2024-12-13 00:56:16,876 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@57bc27f5 2024-12-13 00:56:16,906 main DEBUG Took 0.027957 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-13 00:56:16,907 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-13 00:56:16,907 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-13 00:56:16,909 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-13 00:56:16,911 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-13 00:56:16,939 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-13 00:56:16,981 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-13 00:56:16,983 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-13 00:56:16,985 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-13 00:56:16,985 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-13 00:56:16,989 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-13 00:56:16,990 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-13 00:56:16,993 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-13 00:56:16,993 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-13 00:56:16,996 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-13 00:56:16,997 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-13 00:56:16,999 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-13 00:56:16,999 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-13 00:56:17,001 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-13 00:56:17,001 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-13 00:56:17,003 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-13 00:56:17,004 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-13 00:56:17,005 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-13 00:56:17,005 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-13 00:56:17,006 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-13 00:56:17,007 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-13 00:56:17,008 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-13 00:56:17,009 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-13 00:56:17,010 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-13 00:56:17,011 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-13 00:56:17,012 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-13 00:56:17,012 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-13 00:56:17,014 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-13 00:56:17,016 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-13 00:56:17,033 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-13 00:56:17,034 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-13 00:56:17,035 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-13 00:56:17,036 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-13 00:56:17,073 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-13 00:56:17,077 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-13 00:56:17,079 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-13 00:56:17,080 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-13 00:56:17,080 main DEBUG createAppenders(={Console}) 2024-12-13 00:56:17,081 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@57bc27f5 initialized 2024-12-13 00:56:17,082 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@57bc27f5 2024-12-13 00:56:17,082 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@57bc27f5 OK. 2024-12-13 00:56:17,083 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-13 00:56:17,083 main DEBUG OutputStream closed 2024-12-13 00:56:17,084 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-13 00:56:17,084 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-13 00:56:17,085 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@57cf54e1 OK 2024-12-13 00:56:17,259 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-13 00:56:17,263 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-13 00:56:17,273 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-13 00:56:17,274 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-13 00:56:17,275 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-13 00:56:17,275 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-13 00:56:17,276 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-13 00:56:17,276 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-13 00:56:17,277 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-13 00:56:17,277 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-13 00:56:17,278 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-13 00:56:17,278 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-13 00:56:17,279 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-13 00:56:17,279 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-13 00:56:17,279 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-13 00:56:17,280 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-13 00:56:17,280 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-13 00:56:17,282 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-13 00:56:17,295 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-13 00:56:17,296 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-logging/target/hbase-logging-2.7.0-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@29ca3d04) with optional ClassLoader: null 2024-12-13 00:56:17,296 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-13 00:56:17,297 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@29ca3d04] started OK. 2024-12-13T00:56:17,315 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestExportSnapshot timeout: 13 mins 2024-12-13 00:56:17,319 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-13 00:56:17,320 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-13T00:56:17,907 DEBUG [main {}] hbase.HBaseTestingUtility(348): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e 2024-12-13T00:56:17,908 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestSecureExportSnapshot timeout: 13 mins 2024-12-13T00:56:18,021 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-13T00:56:18,285 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-13T00:56:18,287 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446, deleteOnExit=true 2024-12-13T00:56:18,287 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-12-13T00:56:18,288 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/test.cache.data in system properties and HBase conf 2024-12-13T00:56:18,289 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/hadoop.tmp.dir in system properties and HBase conf 2024-12-13T00:56:18,290 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/hadoop.log.dir in system properties and HBase conf 2024-12-13T00:56:18,291 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-13T00:56:18,291 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-13T00:56:18,292 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-13T00:56:18,391 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-13T00:56:18,395 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-13T00:56:18,396 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-13T00:56:18,397 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-13T00:56:18,397 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-13T00:56:18,398 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-13T00:56:18,399 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-13T00:56:18,399 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-13T00:56:18,400 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-13T00:56:18,400 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-13T00:56:18,401 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/nfs.dump.dir in system properties and HBase conf 2024-12-13T00:56:18,402 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/java.io.tmpdir in system properties and HBase conf 2024-12-13T00:56:18,402 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-13T00:56:18,403 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-13T00:56:18,403 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-13T00:56:19,601 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-13T00:56:19,751 INFO [Time-limited test {}] log.Log(170): Logging initialized @4083ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-13T00:56:19,915 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-13T00:56:20,072 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-13T00:56:20,114 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-13T00:56:20,115 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-13T00:56:20,117 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-13T00:56:20,158 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-13T00:56:20,170 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@33a7d9f8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/hadoop.log.dir/,AVAILABLE} 2024-12-13T00:56:20,172 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@71fea0d0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-13T00:56:20,462 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@570847c1{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/java.io.tmpdir/jetty-localhost-42245-hadoop-hdfs-3_4_1-tests_jar-_-any-16114885278748068751/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-13T00:56:20,476 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5affd01d{HTTP/1.1, (http/1.1)}{localhost:42245} 2024-12-13T00:56:20,477 INFO [Time-limited test {}] server.Server(415): Started @4810ms 2024-12-13T00:56:21,050 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-13T00:56:21,062 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-13T00:56:21,065 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-13T00:56:21,065 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-13T00:56:21,065 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-13T00:56:21,066 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@669430a5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/hadoop.log.dir/,AVAILABLE} 2024-12-13T00:56:21,067 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2f32c184{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-13T00:56:21,192 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@264b92dd{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/java.io.tmpdir/jetty-localhost-46061-hadoop-hdfs-3_4_1-tests_jar-_-any-5342481088348962582/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-13T00:56:21,193 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@499f7acb{HTTP/1.1, (http/1.1)}{localhost:46061} 2024-12-13T00:56:21,194 INFO [Time-limited test {}] server.Server(415): Started @5527ms 2024-12-13T00:56:21,266 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-13T00:56:21,427 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-13T00:56:21,443 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-13T00:56:21,453 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-13T00:56:21,453 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-13T00:56:21,453 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-13T00:56:21,454 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@26ed28d8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/hadoop.log.dir/,AVAILABLE} 2024-12-13T00:56:21,455 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5abba5e0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-13T00:56:21,619 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@30330659{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/java.io.tmpdir/jetty-localhost-42911-hadoop-hdfs-3_4_1-tests_jar-_-any-16853986530131898545/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-13T00:56:21,621 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@49e47e93{HTTP/1.1, (http/1.1)}{localhost:42911} 2024-12-13T00:56:21,621 INFO [Time-limited test {}] server.Server(415): Started @5955ms 2024-12-13T00:56:21,624 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-13T00:56:21,839 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-13T00:56:21,854 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-13T00:56:21,883 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-13T00:56:21,884 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-13T00:56:21,884 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-13T00:56:21,894 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@687e9e0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/hadoop.log.dir/,AVAILABLE} 2024-12-13T00:56:21,895 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1adb2d24{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-13T00:56:21,968 WARN [Thread-105 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/dfs/data/data3/current/BP-1041131206-172.17.0.2-1734051379265/current, will proceed with Du for space computation calculation, 2024-12-13T00:56:21,972 WARN [Thread-106 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/dfs/data/data1/current/BP-1041131206-172.17.0.2-1734051379265/current, will proceed with Du for space computation calculation, 2024-12-13T00:56:21,979 WARN [Thread-107 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/dfs/data/data4/current/BP-1041131206-172.17.0.2-1734051379265/current, will proceed with Du for space computation calculation, 2024-12-13T00:56:21,988 WARN [Thread-108 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/dfs/data/data2/current/BP-1041131206-172.17.0.2-1734051379265/current, will proceed with Du for space computation calculation, 2024-12-13T00:56:22,067 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@167bbb68{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/java.io.tmpdir/jetty-localhost-39891-hadoop-hdfs-3_4_1-tests_jar-_-any-437334991426188168/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-13T00:56:22,069 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3e46a01f{HTTP/1.1, (http/1.1)}{localhost:39891} 2024-12-13T00:56:22,070 INFO [Time-limited test {}] server.Server(415): Started @6403ms 2024-12-13T00:56:22,072 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-13T00:56:22,140 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-13T00:56:22,141 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-13T00:56:22,242 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x794d9e18b63bd5bf with lease ID 0x296f8beeeb01e707: Processing first storage report for DS-09ec43bf-5a6b-4a4f-ba78-e2fdd8819408 from datanode DatanodeRegistration(127.0.0.1:39849, datanodeUuid=11136e46-9b18-4895-bffb-608428d5bcef, infoPort=38485, infoSecurePort=0, ipcPort=38845, storageInfo=lv=-57;cid=testClusterID;nsid=891089685;c=1734051379265) 2024-12-13T00:56:22,243 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x794d9e18b63bd5bf with lease ID 0x296f8beeeb01e707: from storage DS-09ec43bf-5a6b-4a4f-ba78-e2fdd8819408 node DatanodeRegistration(127.0.0.1:39849, datanodeUuid=11136e46-9b18-4895-bffb-608428d5bcef, infoPort=38485, infoSecurePort=0, ipcPort=38845, storageInfo=lv=-57;cid=testClusterID;nsid=891089685;c=1734051379265), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-13T00:56:22,244 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9a9eed2d6e0c8163 with lease ID 0x296f8beeeb01e708: Processing first storage report for DS-1f7dacd0-1c22-4b88-8711-8d3d7164bce1 from datanode DatanodeRegistration(127.0.0.1:42021, datanodeUuid=b510152c-04ee-4459-a0d2-f69dbaee7234, infoPort=46641, infoSecurePort=0, ipcPort=36287, storageInfo=lv=-57;cid=testClusterID;nsid=891089685;c=1734051379265) 2024-12-13T00:56:22,244 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9a9eed2d6e0c8163 with lease ID 0x296f8beeeb01e708: from storage DS-1f7dacd0-1c22-4b88-8711-8d3d7164bce1 node DatanodeRegistration(127.0.0.1:42021, datanodeUuid=b510152c-04ee-4459-a0d2-f69dbaee7234, infoPort=46641, infoSecurePort=0, ipcPort=36287, storageInfo=lv=-57;cid=testClusterID;nsid=891089685;c=1734051379265), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-13T00:56:22,245 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9a9eed2d6e0c8163 with lease ID 0x296f8beeeb01e708: Processing first storage report for DS-62cacfa1-25a4-45a7-ab7e-d2c370e79ccc from datanode DatanodeRegistration(127.0.0.1:42021, datanodeUuid=b510152c-04ee-4459-a0d2-f69dbaee7234, infoPort=46641, infoSecurePort=0, ipcPort=36287, storageInfo=lv=-57;cid=testClusterID;nsid=891089685;c=1734051379265) 2024-12-13T00:56:22,245 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9a9eed2d6e0c8163 with lease ID 0x296f8beeeb01e708: from storage DS-62cacfa1-25a4-45a7-ab7e-d2c370e79ccc node DatanodeRegistration(127.0.0.1:42021, datanodeUuid=b510152c-04ee-4459-a0d2-f69dbaee7234, infoPort=46641, infoSecurePort=0, ipcPort=36287, storageInfo=lv=-57;cid=testClusterID;nsid=891089685;c=1734051379265), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-13T00:56:22,245 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x794d9e18b63bd5bf with lease ID 0x296f8beeeb01e707: Processing first storage report for DS-9d2c202a-4d1a-4876-a1f3-c612480e7277 from datanode DatanodeRegistration(127.0.0.1:39849, datanodeUuid=11136e46-9b18-4895-bffb-608428d5bcef, infoPort=38485, infoSecurePort=0, ipcPort=38845, storageInfo=lv=-57;cid=testClusterID;nsid=891089685;c=1734051379265) 2024-12-13T00:56:22,246 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x794d9e18b63bd5bf with lease ID 0x296f8beeeb01e707: from storage DS-9d2c202a-4d1a-4876-a1f3-c612480e7277 node DatanodeRegistration(127.0.0.1:39849, datanodeUuid=11136e46-9b18-4895-bffb-608428d5bcef, infoPort=38485, infoSecurePort=0, ipcPort=38845, storageInfo=lv=-57;cid=testClusterID;nsid=891089685;c=1734051379265), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-13T00:56:22,358 WARN [Thread-140 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/dfs/data/data6/current/BP-1041131206-172.17.0.2-1734051379265/current, will proceed with Du for space computation calculation, 2024-12-13T00:56:22,366 WARN [Thread-139 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/dfs/data/data5/current/BP-1041131206-172.17.0.2-1734051379265/current, will proceed with Du for space computation calculation, 2024-12-13T00:56:22,478 WARN [Thread-129 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-13T00:56:22,486 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcbe43cf21994aa07 with lease ID 0x296f8beeeb01e709: Processing first storage report for DS-f279c1e1-3811-445f-b8fa-15c0bb69032c from datanode DatanodeRegistration(127.0.0.1:40551, datanodeUuid=2ce12a41-d513-4135-a1ea-08b97fb0db53, infoPort=33569, infoSecurePort=0, ipcPort=41429, storageInfo=lv=-57;cid=testClusterID;nsid=891089685;c=1734051379265) 2024-12-13T00:56:22,486 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcbe43cf21994aa07 with lease ID 0x296f8beeeb01e709: from storage DS-f279c1e1-3811-445f-b8fa-15c0bb69032c node DatanodeRegistration(127.0.0.1:40551, datanodeUuid=2ce12a41-d513-4135-a1ea-08b97fb0db53, infoPort=33569, infoSecurePort=0, ipcPort=41429, storageInfo=lv=-57;cid=testClusterID;nsid=891089685;c=1734051379265), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-13T00:56:22,487 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcbe43cf21994aa07 with lease ID 0x296f8beeeb01e709: Processing first storage report for DS-9aa0e88e-363d-4bad-a15d-5282d33fd834 from datanode DatanodeRegistration(127.0.0.1:40551, datanodeUuid=2ce12a41-d513-4135-a1ea-08b97fb0db53, infoPort=33569, infoSecurePort=0, ipcPort=41429, storageInfo=lv=-57;cid=testClusterID;nsid=891089685;c=1734051379265) 2024-12-13T00:56:22,487 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcbe43cf21994aa07 with lease ID 0x296f8beeeb01e709: from storage DS-9aa0e88e-363d-4bad-a15d-5282d33fd834 node DatanodeRegistration(127.0.0.1:40551, datanodeUuid=2ce12a41-d513-4135-a1ea-08b97fb0db53, infoPort=33569, infoSecurePort=0, ipcPort=41429, storageInfo=lv=-57;cid=testClusterID;nsid=891089685;c=1734051379265), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-13T00:56:22,765 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e 2024-12-13T00:56:22,887 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/zookeeper_0, clientPort=49424, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-13T00:56:22,905 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=49424 2024-12-13T00:56:22,922 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-13T00:56:22,926 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-13T00:56:23,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741825_1001 (size=7) 2024-12-13T00:56:23,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741825_1001 (size=7) 2024-12-13T00:56:23,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741825_1001 (size=7) 2024-12-13T00:56:23,681 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee with version=8 2024-12-13T00:56:23,681 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1460): Setting hbase.fs.tmp.dir to hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/hbase-staging 2024-12-13T00:56:23,830 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-13T00:56:24,209 INFO [Time-limited test {}] client.ConnectionUtils(129): master/9812beb1e4cd:0 server-side Connection retries=45 2024-12-13T00:56:24,240 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-13T00:56:24,241 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-13T00:56:24,242 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-13T00:56:24,242 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-13T00:56:24,242 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-13T00:56:24,450 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-13T00:56:24,535 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-13T00:56:24,548 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-13T00:56:24,553 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-13T00:56:24,594 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 18877 (auto-detected) 2024-12-13T00:56:24,595 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-13T00:56:24,618 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:38293 2024-12-13T00:56:24,628 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-13T00:56:24,632 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-13T00:56:24,651 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:38293 connecting to ZooKeeper ensemble=127.0.0.1:49424 2024-12-13T00:56:24,694 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:382930x0, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-13T00:56:24,698 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:38293-0x10088dfff310000 connected 2024-12-13T00:56:24,742 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-13T00:56:24,746 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-13T00:56:24,771 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-13T00:56:24,777 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38293 2024-12-13T00:56:24,778 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38293 2024-12-13T00:56:24,780 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38293 2024-12-13T00:56:24,784 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38293 2024-12-13T00:56:24,785 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38293 2024-12-13T00:56:24,796 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee, hbase.cluster.distributed=false 2024-12-13T00:56:24,876 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/9812beb1e4cd:0 server-side Connection retries=45 2024-12-13T00:56:24,876 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-13T00:56:24,876 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-13T00:56:24,877 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-13T00:56:24,877 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-13T00:56:24,877 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-13T00:56:24,880 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-13T00:56:24,883 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-13T00:56:24,884 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:46075 2024-12-13T00:56:24,886 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-13T00:56:24,900 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-13T00:56:24,902 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-13T00:56:24,909 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-13T00:56:24,915 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:46075 connecting to ZooKeeper ensemble=127.0.0.1:49424 2024-12-13T00:56:24,924 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:460750x0, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-13T00:56:24,925 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:460750x0, quorum=127.0.0.1:49424, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-13T00:56:24,925 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46075-0x10088dfff310001 connected 2024-12-13T00:56:24,928 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46075-0x10088dfff310001, quorum=127.0.0.1:49424, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-13T00:56:24,929 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46075-0x10088dfff310001, quorum=127.0.0.1:49424, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-13T00:56:24,936 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46075 2024-12-13T00:56:24,940 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46075 2024-12-13T00:56:24,941 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46075 2024-12-13T00:56:24,942 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46075 2024-12-13T00:56:24,942 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46075 2024-12-13T00:56:24,969 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/9812beb1e4cd:0 server-side Connection retries=45 2024-12-13T00:56:24,970 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-13T00:56:24,971 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-13T00:56:24,972 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-13T00:56:24,972 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-13T00:56:24,972 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-13T00:56:24,972 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-13T00:56:24,973 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-13T00:56:24,976 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:45231 2024-12-13T00:56:24,977 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-13T00:56:24,981 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-13T00:56:24,983 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-13T00:56:24,988 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-13T00:56:24,994 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:45231 connecting to ZooKeeper ensemble=127.0.0.1:49424 2024-12-13T00:56:25,001 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:452310x0, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-13T00:56:25,002 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:452310x0, quorum=127.0.0.1:49424, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-13T00:56:25,004 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:452310x0, quorum=127.0.0.1:49424, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-13T00:56:25,005 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:452310x0, quorum=127.0.0.1:49424, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-13T00:56:25,012 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45231-0x10088dfff310002 connected 2024-12-13T00:56:25,024 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45231 2024-12-13T00:56:25,025 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45231 2024-12-13T00:56:25,026 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45231 2024-12-13T00:56:25,028 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45231 2024-12-13T00:56:25,031 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45231 2024-12-13T00:56:25,056 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/9812beb1e4cd:0 server-side Connection retries=45 2024-12-13T00:56:25,056 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-13T00:56:25,056 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-13T00:56:25,057 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-13T00:56:25,057 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-13T00:56:25,057 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-13T00:56:25,057 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-13T00:56:25,058 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-13T00:56:25,059 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:40687 2024-12-13T00:56:25,060 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-13T00:56:25,072 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-13T00:56:25,076 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-13T00:56:25,079 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-13T00:56:25,082 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:40687 connecting to ZooKeeper ensemble=127.0.0.1:49424 2024-12-13T00:56:25,087 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:406870x0, quorum=127.0.0.1:49424, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-13T00:56:25,088 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:406870x0, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-13T00:56:25,089 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:406870x0, quorum=127.0.0.1:49424, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-13T00:56:25,091 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:406870x0, quorum=127.0.0.1:49424, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-13T00:56:25,095 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:40687-0x10088dfff310003 connected 2024-12-13T00:56:25,097 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40687 2024-12-13T00:56:25,098 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40687 2024-12-13T00:56:25,105 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40687 2024-12-13T00:56:25,108 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40687 2024-12-13T00:56:25,109 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40687 2024-12-13T00:56:25,115 INFO [master/9812beb1e4cd:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/9812beb1e4cd,38293,1734051383822 2024-12-13T00:56:25,125 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-13T00:56:25,125 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45231-0x10088dfff310002, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-13T00:56:25,125 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40687-0x10088dfff310003, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-13T00:56:25,126 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46075-0x10088dfff310001, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-13T00:56:25,130 DEBUG [master/9812beb1e4cd:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/9812beb1e4cd,38293,1734051383822 2024-12-13T00:56:25,137 DEBUG [M:0;9812beb1e4cd:38293 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;9812beb1e4cd:38293 2024-12-13T00:56:25,168 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-13T00:56:25,169 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T00:56:25,169 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45231-0x10088dfff310002, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-13T00:56:25,169 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40687-0x10088dfff310003, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-13T00:56:25,169 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45231-0x10088dfff310002, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T00:56:25,169 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40687-0x10088dfff310003, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T00:56:25,170 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46075-0x10088dfff310001, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-13T00:56:25,170 DEBUG [master/9812beb1e4cd:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-13T00:56:25,170 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46075-0x10088dfff310001, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T00:56:25,172 INFO [master/9812beb1e4cd:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/9812beb1e4cd,38293,1734051383822 from backup master directory 2024-12-13T00:56:25,175 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-13T00:56:25,176 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46075-0x10088dfff310001, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-13T00:56:25,176 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40687-0x10088dfff310003, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-13T00:56:25,176 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45231-0x10088dfff310002, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-13T00:56:25,177 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/9812beb1e4cd,38293,1734051383822 2024-12-13T00:56:25,177 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-13T00:56:25,179 WARN [master/9812beb1e4cd:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-13T00:56:25,179 INFO [master/9812beb1e4cd:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=9812beb1e4cd,38293,1734051383822 2024-12-13T00:56:25,182 INFO [master/9812beb1e4cd:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-13T00:56:25,184 INFO [master/9812beb1e4cd:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-13T00:56:25,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741826_1002 (size=42) 2024-12-13T00:56:25,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741826_1002 (size=42) 2024-12-13T00:56:25,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741826_1002 (size=42) 2024-12-13T00:56:25,308 DEBUG [master/9812beb1e4cd:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/hbase.id with ID: e2a3e6ba-bf74-49c8-bdb1-3106f0f89f79 2024-12-13T00:56:25,401 INFO [master/9812beb1e4cd:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-13T00:56:25,439 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46075-0x10088dfff310001, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T00:56:25,439 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T00:56:25,439 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45231-0x10088dfff310002, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T00:56:25,439 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40687-0x10088dfff310003, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T00:56:25,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741827_1003 (size=196) 2024-12-13T00:56:25,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741827_1003 (size=196) 2024-12-13T00:56:25,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741827_1003 (size=196) 2024-12-13T00:56:25,521 INFO [master/9812beb1e4cd:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-13T00:56:25,523 INFO [master/9812beb1e4cd:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-13T00:56:25,549 DEBUG [master/9812beb1e4cd:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:147) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:160) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProvider(WALFactory.java:200) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:232) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:207) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:402) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:973) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2470) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:590) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.HMaster.lambda$run$1(HMaster.java:587) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T00:56:25,556 INFO [master/9812beb1e4cd:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-13T00:56:25,592 WARN [IPC Server handler 2 on default port 46775 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-13T00:56:25,593 WARN [IPC Server handler 2 on default port 46775 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-13T00:56:25,593 WARN [IPC Server handler 2 on default port 46775 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-13T00:56:25,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741828_1004 (size=1189) 2024-12-13T00:56:25,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741828_1004 (size=1189) 2024-12-13T00:56:25,634 INFO [master/9812beb1e4cd:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/MasterData/data/master/store 2024-12-13T00:56:25,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741829_1005 (size=34) 2024-12-13T00:56:25,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741829_1005 (size=34) 2024-12-13T00:56:25,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741829_1005 (size=34) 2024-12-13T00:56:25,666 INFO [master/9812beb1e4cd:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-13T00:56:25,667 DEBUG [master/9812beb1e4cd:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T00:56:25,669 DEBUG [master/9812beb1e4cd:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-13T00:56:25,669 INFO [master/9812beb1e4cd:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-13T00:56:25,669 DEBUG [master/9812beb1e4cd:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-13T00:56:25,670 DEBUG [master/9812beb1e4cd:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-13T00:56:25,670 DEBUG [master/9812beb1e4cd:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-13T00:56:25,672 INFO [master/9812beb1e4cd:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-13T00:56:25,673 DEBUG [master/9812beb1e4cd:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-13T00:56:25,675 WARN [master/9812beb1e4cd:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/MasterData/data/master/store/.initializing 2024-12-13T00:56:25,675 DEBUG [master/9812beb1e4cd:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/MasterData/WALs/9812beb1e4cd,38293,1734051383822 2024-12-13T00:56:25,683 INFO [master/9812beb1e4cd:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-13T00:56:25,699 INFO [master/9812beb1e4cd:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=9812beb1e4cd%2C38293%2C1734051383822, suffix=, logDir=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/MasterData/WALs/9812beb1e4cd,38293,1734051383822, archiveDir=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/MasterData/oldWALs, maxLogs=10 2024-12-13T00:56:25,726 DEBUG [master/9812beb1e4cd:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/MasterData/WALs/9812beb1e4cd,38293,1734051383822/9812beb1e4cd%2C38293%2C1734051383822.1734051385705, exclude list is [], retry=0 2024-12-13T00:56:25,731 WARN [IPC Server handler 1 on default port 46775 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-13T00:56:25,732 WARN [IPC Server handler 1 on default port 46775 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-13T00:56:25,732 WARN [IPC Server handler 1 on default port 46775 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-13T00:56:25,749 DEBUG [RS-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39849,DS-09ec43bf-5a6b-4a4f-ba78-e2fdd8819408,DISK] 2024-12-13T00:56:25,752 DEBUG [RS-EventLoopGroup-5-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-13T00:56:25,756 DEBUG [RS-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42021,DS-1f7dacd0-1c22-4b88-8711-8d3d7164bce1,DISK] 2024-12-13T00:56:25,803 INFO [master/9812beb1e4cd:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/MasterData/WALs/9812beb1e4cd,38293,1734051383822/9812beb1e4cd%2C38293%2C1734051383822.1734051385705 2024-12-13T00:56:25,804 DEBUG [master/9812beb1e4cd:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:38485:38485),(127.0.0.1/127.0.0.1:46641:46641)] 2024-12-13T00:56:25,804 DEBUG [master/9812beb1e4cd:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-13T00:56:25,805 DEBUG [master/9812beb1e4cd:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T00:56:25,809 DEBUG [master/9812beb1e4cd:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-13T00:56:25,811 DEBUG [master/9812beb1e4cd:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-13T00:56:25,865 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-13T00:56:25,900 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-13T00:56:25,907 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T00:56:25,910 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-13T00:56:25,911 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-13T00:56:25,917 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-13T00:56:25,918 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T00:56:25,919 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T00:56:25,919 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-13T00:56:25,922 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-13T00:56:25,923 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T00:56:25,924 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T00:56:25,924 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-13T00:56:25,927 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-13T00:56:25,927 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T00:56:25,928 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T00:56:25,960 DEBUG [master/9812beb1e4cd:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-13T00:56:25,962 DEBUG [master/9812beb1e4cd:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-13T00:56:25,981 DEBUG [master/9812beb1e4cd:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-13T00:56:25,996 DEBUG [master/9812beb1e4cd:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-13T00:56:26,003 DEBUG [master/9812beb1e4cd:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T00:56:26,005 INFO [master/9812beb1e4cd:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59136758, jitterRate=-0.11879363656044006}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-13T00:56:26,011 DEBUG [master/9812beb1e4cd:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-13T00:56:26,012 INFO [master/9812beb1e4cd:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-13T00:56:26,047 DEBUG [master/9812beb1e4cd:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@9e52c00, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T00:56:26,095 INFO [master/9812beb1e4cd:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-12-13T00:56:26,111 INFO [master/9812beb1e4cd:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-13T00:56:26,112 INFO [master/9812beb1e4cd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-13T00:56:26,115 INFO [master/9812beb1e4cd:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-13T00:56:26,116 INFO [master/9812beb1e4cd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 1 msec 2024-12-13T00:56:26,123 INFO [master/9812beb1e4cd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 6 msec 2024-12-13T00:56:26,123 INFO [master/9812beb1e4cd:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-13T00:56:26,156 INFO [master/9812beb1e4cd:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-13T00:56:26,169 DEBUG [master/9812beb1e4cd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-13T00:56:26,172 DEBUG [master/9812beb1e4cd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-12-13T00:56:26,176 INFO [master/9812beb1e4cd:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-13T00:56:26,177 DEBUG [master/9812beb1e4cd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-13T00:56:26,180 DEBUG [master/9812beb1e4cd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-12-13T00:56:26,183 INFO [master/9812beb1e4cd:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-13T00:56:26,188 DEBUG [master/9812beb1e4cd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-13T00:56:26,190 DEBUG [master/9812beb1e4cd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-12-13T00:56:26,191 DEBUG [master/9812beb1e4cd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-13T00:56:26,193 DEBUG [master/9812beb1e4cd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-12-13T00:56:26,207 DEBUG [master/9812beb1e4cd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-13T00:56:26,209 DEBUG [master/9812beb1e4cd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-13T00:56:26,215 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46075-0x10088dfff310001, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-13T00:56:26,215 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45231-0x10088dfff310002, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-13T00:56:26,216 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-13T00:56:26,216 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45231-0x10088dfff310002, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T00:56:26,216 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T00:56:26,216 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40687-0x10088dfff310003, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-13T00:56:26,216 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46075-0x10088dfff310001, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T00:56:26,216 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40687-0x10088dfff310003, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T00:56:26,217 INFO [master/9812beb1e4cd:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=9812beb1e4cd,38293,1734051383822, sessionid=0x10088dfff310000, setting cluster-up flag (Was=false) 2024-12-13T00:56:26,235 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45231-0x10088dfff310002, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T00:56:26,236 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T00:56:26,237 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46075-0x10088dfff310001, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T00:56:26,237 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40687-0x10088dfff310003, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T00:56:26,245 DEBUG [master/9812beb1e4cd:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-13T00:56:26,248 DEBUG [master/9812beb1e4cd:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=9812beb1e4cd,38293,1734051383822 2024-12-13T00:56:26,254 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T00:56:26,254 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45231-0x10088dfff310002, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T00:56:26,255 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46075-0x10088dfff310001, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T00:56:26,255 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40687-0x10088dfff310003, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T00:56:26,263 DEBUG [master/9812beb1e4cd:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-13T00:56:26,265 DEBUG [master/9812beb1e4cd:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=9812beb1e4cd,38293,1734051383822 2024-12-13T00:56:26,346 DEBUG [RS:1;9812beb1e4cd:45231 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;9812beb1e4cd:45231 2024-12-13T00:56:26,350 INFO [RS:1;9812beb1e4cd:45231 {}] regionserver.HRegionServer(1008): ClusterId : e2a3e6ba-bf74-49c8-bdb1-3106f0f89f79 2024-12-13T00:56:26,351 DEBUG [RS:2;9812beb1e4cd:40687 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;9812beb1e4cd:40687 2024-12-13T00:56:26,353 INFO [RS:2;9812beb1e4cd:40687 {}] regionserver.HRegionServer(1008): ClusterId : e2a3e6ba-bf74-49c8-bdb1-3106f0f89f79 2024-12-13T00:56:26,355 DEBUG [RS:0;9812beb1e4cd:46075 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;9812beb1e4cd:46075 2024-12-13T00:56:26,356 DEBUG [RS:1;9812beb1e4cd:45231 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-13T00:56:26,356 DEBUG [RS:2;9812beb1e4cd:40687 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-13T00:56:26,359 INFO [RS:0;9812beb1e4cd:46075 {}] regionserver.HRegionServer(1008): ClusterId : e2a3e6ba-bf74-49c8-bdb1-3106f0f89f79 2024-12-13T00:56:26,359 DEBUG [RS:0;9812beb1e4cd:46075 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-13T00:56:26,365 DEBUG [RS:2;9812beb1e4cd:40687 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-13T00:56:26,365 DEBUG [RS:2;9812beb1e4cd:40687 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-13T00:56:26,365 DEBUG [RS:0;9812beb1e4cd:46075 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-13T00:56:26,365 DEBUG [RS:0;9812beb1e4cd:46075 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-13T00:56:26,368 DEBUG [RS:1;9812beb1e4cd:45231 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-13T00:56:26,369 DEBUG [RS:1;9812beb1e4cd:45231 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-13T00:56:26,373 DEBUG [RS:1;9812beb1e4cd:45231 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-13T00:56:26,373 DEBUG [RS:1;9812beb1e4cd:45231 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@67a59aed, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T00:56:26,376 DEBUG [RS:2;9812beb1e4cd:40687 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-13T00:56:26,376 DEBUG [RS:0;9812beb1e4cd:46075 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-13T00:56:26,376 DEBUG [RS:2;9812beb1e4cd:40687 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6f4655bd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T00:56:26,377 DEBUG [RS:0;9812beb1e4cd:46075 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2a72998c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T00:56:26,380 DEBUG [RS:1;9812beb1e4cd:45231 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@40a90c40, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=9812beb1e4cd/172.17.0.2:0 2024-12-13T00:56:26,385 INFO [RS:1;9812beb1e4cd:45231 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-13T00:56:26,386 INFO [RS:1;9812beb1e4cd:45231 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-13T00:56:26,390 DEBUG [RS:2;9812beb1e4cd:40687 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@23ee7b64, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=9812beb1e4cd/172.17.0.2:0 2024-12-13T00:56:26,391 INFO [RS:2;9812beb1e4cd:40687 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-13T00:56:26,391 INFO [RS:2;9812beb1e4cd:40687 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-13T00:56:26,400 DEBUG [RS:0;9812beb1e4cd:46075 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5963ba16, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=9812beb1e4cd/172.17.0.2:0 2024-12-13T00:56:26,401 INFO [RS:0;9812beb1e4cd:46075 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-13T00:56:26,401 INFO [RS:0;9812beb1e4cd:46075 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-13T00:56:26,448 DEBUG [RS:1;9812beb1e4cd:45231 {}] regionserver.HRegionServer(900): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-12-13T00:56:26,448 DEBUG [RS:2;9812beb1e4cd:40687 {}] regionserver.HRegionServer(900): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-12-13T00:56:26,448 DEBUG [master/9812beb1e4cd:0:becomeActiveMaster {}] master.HMaster(3390): Registered master coprocessor service: service=AccessControlService 2024-12-13T00:56:26,448 DEBUG [RS:0;9812beb1e4cd:46075 {}] regionserver.HRegionServer(900): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-12-13T00:56:26,449 INFO [RS:1;9812beb1e4cd:45231 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-13T00:56:26,449 DEBUG [RS:1;9812beb1e4cd:45231 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-13T00:56:26,449 INFO [RS:2;9812beb1e4cd:40687 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-13T00:56:26,449 DEBUG [RS:2;9812beb1e4cd:40687 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-13T00:56:26,450 INFO [master/9812beb1e4cd:0:becomeActiveMaster {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-13T00:56:26,450 INFO [master/9812beb1e4cd:0:becomeActiveMaster {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver loaded, priority=536870912. 2024-12-13T00:56:26,453 INFO [RS:1;9812beb1e4cd:45231 {}] regionserver.HRegionServer(3073): reportForDuty to master=9812beb1e4cd,38293,1734051383822 with isa=9812beb1e4cd/172.17.0.2:45231, startcode=1734051384968 2024-12-13T00:56:26,453 INFO [RS:2;9812beb1e4cd:40687 {}] regionserver.HRegionServer(3073): reportForDuty to master=9812beb1e4cd,38293,1734051383822 with isa=9812beb1e4cd/172.17.0.2:40687, startcode=1734051385054 2024-12-13T00:56:26,464 INFO [RS:0;9812beb1e4cd:46075 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-13T00:56:26,464 DEBUG [RS:0;9812beb1e4cd:46075 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-13T00:56:26,466 INFO [RS:0;9812beb1e4cd:46075 {}] regionserver.HRegionServer(3073): reportForDuty to master=9812beb1e4cd,38293,1734051383822 with isa=9812beb1e4cd/172.17.0.2:46075, startcode=1734051384875 2024-12-13T00:56:26,470 DEBUG [RS:2;9812beb1e4cd:40687 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-13T00:56:26,471 DEBUG [RS:1;9812beb1e4cd:45231 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-13T00:56:26,474 DEBUG [RS:0;9812beb1e4cd:46075 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-13T00:56:26,533 DEBUG [master/9812beb1e4cd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-12-13T00:56:26,540 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37865, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-13T00:56:26,542 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56647, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-13T00:56:26,545 INFO [master/9812beb1e4cd:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-12-13T00:56:26,548 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60539, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-13T00:56:26,549 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38293 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3280) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:593) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T00:56:26,550 INFO [master/9812beb1e4cd:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-13T00:56:26,558 DEBUG [master/9812beb1e4cd:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 9812beb1e4cd,38293,1734051383822 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-13T00:56:26,561 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38293 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3280) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:593) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T00:56:26,562 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38293 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3280) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:593) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T00:56:26,565 DEBUG [master/9812beb1e4cd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/9812beb1e4cd:0, corePoolSize=5, maxPoolSize=5 2024-12-13T00:56:26,565 DEBUG [master/9812beb1e4cd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/9812beb1e4cd:0, corePoolSize=5, maxPoolSize=5 2024-12-13T00:56:26,565 DEBUG [master/9812beb1e4cd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/9812beb1e4cd:0, corePoolSize=5, maxPoolSize=5 2024-12-13T00:56:26,565 DEBUG [master/9812beb1e4cd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/9812beb1e4cd:0, corePoolSize=5, maxPoolSize=5 2024-12-13T00:56:26,566 DEBUG [master/9812beb1e4cd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/9812beb1e4cd:0, corePoolSize=10, maxPoolSize=10 2024-12-13T00:56:26,566 DEBUG [master/9812beb1e4cd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/9812beb1e4cd:0, corePoolSize=1, maxPoolSize=1 2024-12-13T00:56:26,566 DEBUG [master/9812beb1e4cd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/9812beb1e4cd:0, corePoolSize=2, maxPoolSize=2 2024-12-13T00:56:26,566 DEBUG [master/9812beb1e4cd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/9812beb1e4cd:0, corePoolSize=1, maxPoolSize=1 2024-12-13T00:56:26,593 DEBUG [RS:0;9812beb1e4cd:46075 {}] regionserver.HRegionServer(3097): Master is not running yet 2024-12-13T00:56:26,594 WARN [RS:0;9812beb1e4cd:46075 {}] regionserver.HRegionServer(1099): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-13T00:56:26,594 DEBUG [RS:2;9812beb1e4cd:40687 {}] regionserver.HRegionServer(3097): Master is not running yet 2024-12-13T00:56:26,594 WARN [RS:2;9812beb1e4cd:40687 {}] regionserver.HRegionServer(1099): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-13T00:56:26,598 DEBUG [RS:1;9812beb1e4cd:45231 {}] regionserver.HRegionServer(3097): Master is not running yet 2024-12-13T00:56:26,598 WARN [RS:1;9812beb1e4cd:45231 {}] regionserver.HRegionServer(1099): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-13T00:56:26,605 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-12-13T00:56:26,606 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-12-13T00:56:26,624 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T00:56:26,624 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-13T00:56:26,645 INFO [master/9812beb1e4cd:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1734051416644 2024-12-13T00:56:26,647 INFO [master/9812beb1e4cd:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-13T00:56:26,649 INFO [master/9812beb1e4cd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-13T00:56:26,653 INFO [master/9812beb1e4cd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-13T00:56:26,654 INFO [master/9812beb1e4cd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-13T00:56:26,654 INFO [master/9812beb1e4cd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-13T00:56:26,654 INFO [master/9812beb1e4cd:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-13T00:56:26,663 INFO [master/9812beb1e4cd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-13T00:56:26,681 INFO [master/9812beb1e4cd:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-13T00:56:26,683 INFO [master/9812beb1e4cd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-13T00:56:26,683 INFO [master/9812beb1e4cd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-13T00:56:26,695 INFO [RS:0;9812beb1e4cd:46075 {}] regionserver.HRegionServer(3073): reportForDuty to master=9812beb1e4cd,38293,1734051383822 with isa=9812beb1e4cd/172.17.0.2:46075, startcode=1734051384875 2024-12-13T00:56:26,695 INFO [RS:2;9812beb1e4cd:40687 {}] regionserver.HRegionServer(3073): reportForDuty to master=9812beb1e4cd,38293,1734051383822 with isa=9812beb1e4cd/172.17.0.2:40687, startcode=1734051385054 2024-12-13T00:56:26,697 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38293 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3280) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:593) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T00:56:26,698 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38293 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3280) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:593) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T00:56:26,698 DEBUG [RS:0;9812beb1e4cd:46075 {}] regionserver.HRegionServer(3097): Master is not running yet 2024-12-13T00:56:26,698 WARN [RS:0;9812beb1e4cd:46075 {}] regionserver.HRegionServer(1099): reportForDuty failed; sleeping 200 ms and then retrying. 2024-12-13T00:56:26,699 DEBUG [RS:2;9812beb1e4cd:40687 {}] regionserver.HRegionServer(3097): Master is not running yet 2024-12-13T00:56:26,700 WARN [RS:2;9812beb1e4cd:40687 {}] regionserver.HRegionServer(1099): reportForDuty failed; sleeping 200 ms and then retrying. 2024-12-13T00:56:26,700 INFO [RS:1;9812beb1e4cd:45231 {}] regionserver.HRegionServer(3073): reportForDuty to master=9812beb1e4cd,38293,1734051383822 with isa=9812beb1e4cd/172.17.0.2:45231, startcode=1734051384968 2024-12-13T00:56:26,701 INFO [master/9812beb1e4cd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-13T00:56:26,701 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38293 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3280) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:593) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T00:56:26,702 INFO [master/9812beb1e4cd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-13T00:56:26,703 DEBUG [RS:1;9812beb1e4cd:45231 {}] regionserver.HRegionServer(3097): Master is not running yet 2024-12-13T00:56:26,703 WARN [RS:1;9812beb1e4cd:45231 {}] regionserver.HRegionServer(1099): reportForDuty failed; sleeping 200 ms and then retrying. 2024-12-13T00:56:26,719 DEBUG [master/9812beb1e4cd:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/9812beb1e4cd:0:becomeActiveMaster-HFileCleaner.large.0-1734051386703,5,FailOnTimeoutGroup] 2024-12-13T00:56:26,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741831_1007 (size=1039) 2024-12-13T00:56:26,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741831_1007 (size=1039) 2024-12-13T00:56:26,744 DEBUG [master/9812beb1e4cd:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/9812beb1e4cd:0:becomeActiveMaster-HFileCleaner.small.0-1734051386719,5,FailOnTimeoutGroup] 2024-12-13T00:56:26,744 INFO [master/9812beb1e4cd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-13T00:56:26,744 INFO [master/9812beb1e4cd:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-13T00:56:26,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741831_1007 (size=1039) 2024-12-13T00:56:26,746 INFO [master/9812beb1e4cd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-13T00:56:26,747 INFO [master/9812beb1e4cd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-13T00:56:26,749 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-12-13T00:56:26,750 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee 2024-12-13T00:56:26,768 WARN [IPC Server handler 3 on default port 46775 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-13T00:56:26,769 WARN [IPC Server handler 3 on default port 46775 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-13T00:56:26,769 WARN [IPC Server handler 3 on default port 46775 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-13T00:56:26,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741832_1008 (size=32) 2024-12-13T00:56:26,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741832_1008 (size=32) 2024-12-13T00:56:26,806 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T00:56:26,811 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-13T00:56:26,817 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-13T00:56:26,817 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T00:56:26,819 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-13T00:56:26,819 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-13T00:56:26,823 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-13T00:56:26,823 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T00:56:26,824 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-13T00:56:26,825 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-13T00:56:26,828 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-13T00:56:26,828 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T00:56:26,829 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-13T00:56:26,832 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/hbase/meta/1588230740 2024-12-13T00:56:26,833 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/hbase/meta/1588230740 2024-12-13T00:56:26,839 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-13T00:56:26,843 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-13T00:56:26,849 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T00:56:26,851 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61230546, jitterRate=-0.08759376406669617}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-13T00:56:26,854 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-13T00:56:26,854 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-13T00:56:26,854 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-13T00:56:26,854 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-13T00:56:26,854 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-13T00:56:26,854 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-13T00:56:26,856 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-13T00:56:26,856 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-13T00:56:26,861 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-12-13T00:56:26,862 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-12-13T00:56:26,879 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-13T00:56:26,898 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-13T00:56:26,899 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-13T00:56:26,899 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-13T00:56:26,899 WARN [RedundancyMonitor {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK, ARCHIVE], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-13T00:56:26,899 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK, ARCHIVE], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) All required storage types are unavailable: unavailableStorages=[DISK, ARCHIVE], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-13T00:56:26,900 INFO [RS:0;9812beb1e4cd:46075 {}] regionserver.HRegionServer(3073): reportForDuty to master=9812beb1e4cd,38293,1734051383822 with isa=9812beb1e4cd/172.17.0.2:46075, startcode=1734051384875 2024-12-13T00:56:26,900 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-13T00:56:26,900 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-13T00:56:26,900 WARN [RedundancyMonitor {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK, ARCHIVE], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-13T00:56:26,900 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK, ARCHIVE], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) All required storage types are unavailable: unavailableStorages=[DISK, ARCHIVE], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-13T00:56:26,902 INFO [RS:2;9812beb1e4cd:40687 {}] regionserver.HRegionServer(3073): reportForDuty to master=9812beb1e4cd,38293,1734051383822 with isa=9812beb1e4cd/172.17.0.2:40687, startcode=1734051385054 2024-12-13T00:56:26,902 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38293 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 9812beb1e4cd,46075,1734051384875 2024-12-13T00:56:26,905 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38293 {}] master.ServerManager(486): Registering regionserver=9812beb1e4cd,46075,1734051384875 2024-12-13T00:56:26,910 INFO [RS:1;9812beb1e4cd:45231 {}] regionserver.HRegionServer(3073): reportForDuty to master=9812beb1e4cd,38293,1734051383822 with isa=9812beb1e4cd/172.17.0.2:45231, startcode=1734051384968 2024-12-13T00:56:26,912 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-13T00:56:26,927 DEBUG [RS:0;9812beb1e4cd:46075 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee 2024-12-13T00:56:26,927 DEBUG [RS:0;9812beb1e4cd:46075 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:46775 2024-12-13T00:56:26,927 DEBUG [RS:0;9812beb1e4cd:46075 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-13T00:56:26,927 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38293 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 9812beb1e4cd,40687,1734051385054 2024-12-13T00:56:26,930 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38293 {}] master.ServerManager(486): Registering regionserver=9812beb1e4cd,40687,1734051385054 2024-12-13T00:56:26,938 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38293 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 9812beb1e4cd,45231,1734051384968 2024-12-13T00:56:26,938 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-13T00:56:26,938 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38293 {}] master.ServerManager(486): Registering regionserver=9812beb1e4cd,45231,1734051384968 2024-12-13T00:56:26,939 DEBUG [RS:0;9812beb1e4cd:46075 {}] zookeeper.ZKUtil(111): regionserver:46075-0x10088dfff310001, quorum=127.0.0.1:49424, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/9812beb1e4cd,46075,1734051384875 2024-12-13T00:56:26,939 WARN [RS:0;9812beb1e4cd:46075 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-13T00:56:26,939 DEBUG [RS:2;9812beb1e4cd:40687 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee 2024-12-13T00:56:26,940 DEBUG [RS:2;9812beb1e4cd:40687 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:46775 2024-12-13T00:56:26,940 DEBUG [RS:2;9812beb1e4cd:40687 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-13T00:56:26,940 INFO [RS:0;9812beb1e4cd:46075 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-13T00:56:26,940 DEBUG [RS:0;9812beb1e4cd:46075 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/WALs/9812beb1e4cd,46075,1734051384875 2024-12-13T00:56:26,943 DEBUG [RS:1;9812beb1e4cd:45231 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee 2024-12-13T00:56:26,943 DEBUG [RS:1;9812beb1e4cd:45231 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:46775 2024-12-13T00:56:26,943 DEBUG [RS:1;9812beb1e4cd:45231 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-13T00:56:26,946 DEBUG [RS:2;9812beb1e4cd:40687 {}] zookeeper.ZKUtil(111): regionserver:40687-0x10088dfff310003, quorum=127.0.0.1:49424, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/9812beb1e4cd,40687,1734051385054 2024-12-13T00:56:26,947 WARN [RS:2;9812beb1e4cd:40687 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-13T00:56:26,947 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-13T00:56:26,947 INFO [RS:2;9812beb1e4cd:40687 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-13T00:56:26,947 DEBUG [RS:2;9812beb1e4cd:40687 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/WALs/9812beb1e4cd,40687,1734051385054 2024-12-13T00:56:26,947 DEBUG [RS:1;9812beb1e4cd:45231 {}] zookeeper.ZKUtil(111): regionserver:45231-0x10088dfff310002, quorum=127.0.0.1:49424, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/9812beb1e4cd,45231,1734051384968 2024-12-13T00:56:26,948 WARN [RS:1;9812beb1e4cd:45231 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-13T00:56:26,948 INFO [RS:1;9812beb1e4cd:45231 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-13T00:56:26,948 DEBUG [RS:1;9812beb1e4cd:45231 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/WALs/9812beb1e4cd,45231,1734051384968 2024-12-13T00:56:26,949 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [9812beb1e4cd,40687,1734051385054] 2024-12-13T00:56:26,952 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [9812beb1e4cd,46075,1734051384875] 2024-12-13T00:56:26,953 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [9812beb1e4cd,45231,1734051384968] 2024-12-13T00:56:26,996 DEBUG [RS:0;9812beb1e4cd:46075 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-13T00:56:26,996 DEBUG [RS:1;9812beb1e4cd:45231 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-13T00:56:27,000 DEBUG [RS:2;9812beb1e4cd:40687 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-13T00:56:27,015 INFO [RS:1;9812beb1e4cd:45231 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-13T00:56:27,015 INFO [RS:2;9812beb1e4cd:40687 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-13T00:56:27,015 INFO [RS:0;9812beb1e4cd:46075 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-13T00:56:27,034 INFO [RS:1;9812beb1e4cd:45231 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-13T00:56:27,034 INFO [RS:0;9812beb1e4cd:46075 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-13T00:56:27,042 INFO [RS:0;9812beb1e4cd:46075 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-13T00:56:27,043 INFO [RS:0;9812beb1e4cd:46075 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-13T00:56:27,044 INFO [RS:1;9812beb1e4cd:45231 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-13T00:56:27,044 INFO [RS:1;9812beb1e4cd:45231 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-13T00:56:27,044 INFO [RS:2;9812beb1e4cd:40687 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-13T00:56:27,046 INFO [RS:0;9812beb1e4cd:46075 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-13T00:56:27,056 INFO [RS:0;9812beb1e4cd:46075 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-13T00:56:27,056 DEBUG [RS:0;9812beb1e4cd:46075 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/9812beb1e4cd:0, corePoolSize=1, maxPoolSize=1 2024-12-13T00:56:27,057 DEBUG [RS:0;9812beb1e4cd:46075 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/9812beb1e4cd:0, corePoolSize=1, maxPoolSize=1 2024-12-13T00:56:27,057 DEBUG [RS:0;9812beb1e4cd:46075 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/9812beb1e4cd:0, corePoolSize=1, maxPoolSize=1 2024-12-13T00:56:27,057 DEBUG [RS:0;9812beb1e4cd:46075 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/9812beb1e4cd:0, corePoolSize=1, maxPoolSize=1 2024-12-13T00:56:27,057 DEBUG [RS:0;9812beb1e4cd:46075 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/9812beb1e4cd:0, corePoolSize=1, maxPoolSize=1 2024-12-13T00:56:27,058 DEBUG [RS:0;9812beb1e4cd:46075 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/9812beb1e4cd:0, corePoolSize=2, maxPoolSize=2 2024-12-13T00:56:27,058 DEBUG [RS:0;9812beb1e4cd:46075 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/9812beb1e4cd:0, corePoolSize=1, maxPoolSize=1 2024-12-13T00:56:27,058 DEBUG [RS:0;9812beb1e4cd:46075 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/9812beb1e4cd:0, corePoolSize=1, maxPoolSize=1 2024-12-13T00:56:27,058 DEBUG [RS:0;9812beb1e4cd:46075 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/9812beb1e4cd:0, corePoolSize=1, maxPoolSize=1 2024-12-13T00:56:27,058 DEBUG [RS:0;9812beb1e4cd:46075 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/9812beb1e4cd:0, corePoolSize=1, maxPoolSize=1 2024-12-13T00:56:27,059 DEBUG [RS:0;9812beb1e4cd:46075 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/9812beb1e4cd:0, corePoolSize=1, maxPoolSize=1 2024-12-13T00:56:27,059 DEBUG [RS:0;9812beb1e4cd:46075 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0, corePoolSize=3, maxPoolSize=3 2024-12-13T00:56:27,059 DEBUG [RS:0;9812beb1e4cd:46075 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/9812beb1e4cd:0, corePoolSize=3, maxPoolSize=3 2024-12-13T00:56:27,063 INFO [RS:1;9812beb1e4cd:45231 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-13T00:56:27,063 WARN [9812beb1e4cd:38293 {}] assignment.AssignmentManager(2423): No servers available; cannot place 1 unassigned regions. 2024-12-13T00:56:27,065 INFO [RS:1;9812beb1e4cd:45231 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-13T00:56:27,065 DEBUG [RS:1;9812beb1e4cd:45231 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/9812beb1e4cd:0, corePoolSize=1, maxPoolSize=1 2024-12-13T00:56:27,066 DEBUG [RS:1;9812beb1e4cd:45231 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/9812beb1e4cd:0, corePoolSize=1, maxPoolSize=1 2024-12-13T00:56:27,066 DEBUG [RS:1;9812beb1e4cd:45231 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/9812beb1e4cd:0, corePoolSize=1, maxPoolSize=1 2024-12-13T00:56:27,066 DEBUG [RS:1;9812beb1e4cd:45231 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/9812beb1e4cd:0, corePoolSize=1, maxPoolSize=1 2024-12-13T00:56:27,066 DEBUG [RS:1;9812beb1e4cd:45231 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/9812beb1e4cd:0, corePoolSize=1, maxPoolSize=1 2024-12-13T00:56:27,066 DEBUG [RS:1;9812beb1e4cd:45231 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/9812beb1e4cd:0, corePoolSize=2, maxPoolSize=2 2024-12-13T00:56:27,066 DEBUG [RS:1;9812beb1e4cd:45231 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/9812beb1e4cd:0, corePoolSize=1, maxPoolSize=1 2024-12-13T00:56:27,067 DEBUG [RS:1;9812beb1e4cd:45231 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/9812beb1e4cd:0, corePoolSize=1, maxPoolSize=1 2024-12-13T00:56:27,067 DEBUG [RS:1;9812beb1e4cd:45231 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/9812beb1e4cd:0, corePoolSize=1, maxPoolSize=1 2024-12-13T00:56:27,067 DEBUG [RS:1;9812beb1e4cd:45231 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/9812beb1e4cd:0, corePoolSize=1, maxPoolSize=1 2024-12-13T00:56:27,067 DEBUG [RS:1;9812beb1e4cd:45231 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/9812beb1e4cd:0, corePoolSize=1, maxPoolSize=1 2024-12-13T00:56:27,067 DEBUG [RS:1;9812beb1e4cd:45231 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0, corePoolSize=3, maxPoolSize=3 2024-12-13T00:56:27,067 DEBUG [RS:1;9812beb1e4cd:45231 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/9812beb1e4cd:0, corePoolSize=3, maxPoolSize=3 2024-12-13T00:56:27,079 INFO [RS:2;9812beb1e4cd:40687 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-13T00:56:27,079 INFO [RS:2;9812beb1e4cd:40687 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-13T00:56:27,093 INFO [RS:0;9812beb1e4cd:46075 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-13T00:56:27,094 INFO [RS:0;9812beb1e4cd:46075 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-13T00:56:27,094 INFO [RS:0;9812beb1e4cd:46075 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-13T00:56:27,094 INFO [RS:0;9812beb1e4cd:46075 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-13T00:56:27,094 INFO [RS:0;9812beb1e4cd:46075 {}] hbase.ChoreService(168): Chore ScheduledChore name=9812beb1e4cd,46075,1734051384875-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-13T00:56:27,108 INFO [RS:2;9812beb1e4cd:40687 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-13T00:56:27,108 INFO [RS:1;9812beb1e4cd:45231 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-13T00:56:27,108 INFO [RS:1;9812beb1e4cd:45231 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-13T00:56:27,109 INFO [RS:1;9812beb1e4cd:45231 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-13T00:56:27,109 INFO [RS:1;9812beb1e4cd:45231 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-13T00:56:27,109 INFO [RS:1;9812beb1e4cd:45231 {}] hbase.ChoreService(168): Chore ScheduledChore name=9812beb1e4cd,45231,1734051384968-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-13T00:56:27,111 INFO [RS:2;9812beb1e4cd:40687 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-13T00:56:27,111 DEBUG [RS:2;9812beb1e4cd:40687 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/9812beb1e4cd:0, corePoolSize=1, maxPoolSize=1 2024-12-13T00:56:27,112 DEBUG [RS:2;9812beb1e4cd:40687 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/9812beb1e4cd:0, corePoolSize=1, maxPoolSize=1 2024-12-13T00:56:27,112 DEBUG [RS:2;9812beb1e4cd:40687 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/9812beb1e4cd:0, corePoolSize=1, maxPoolSize=1 2024-12-13T00:56:27,112 DEBUG [RS:2;9812beb1e4cd:40687 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/9812beb1e4cd:0, corePoolSize=1, maxPoolSize=1 2024-12-13T00:56:27,113 DEBUG [RS:2;9812beb1e4cd:40687 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/9812beb1e4cd:0, corePoolSize=1, maxPoolSize=1 2024-12-13T00:56:27,113 DEBUG [RS:2;9812beb1e4cd:40687 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/9812beb1e4cd:0, corePoolSize=2, maxPoolSize=2 2024-12-13T00:56:27,113 DEBUG [RS:2;9812beb1e4cd:40687 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/9812beb1e4cd:0, corePoolSize=1, maxPoolSize=1 2024-12-13T00:56:27,113 DEBUG [RS:2;9812beb1e4cd:40687 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/9812beb1e4cd:0, corePoolSize=1, maxPoolSize=1 2024-12-13T00:56:27,113 DEBUG [RS:2;9812beb1e4cd:40687 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/9812beb1e4cd:0, corePoolSize=1, maxPoolSize=1 2024-12-13T00:56:27,113 DEBUG [RS:2;9812beb1e4cd:40687 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/9812beb1e4cd:0, corePoolSize=1, maxPoolSize=1 2024-12-13T00:56:27,114 DEBUG [RS:2;9812beb1e4cd:40687 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/9812beb1e4cd:0, corePoolSize=1, maxPoolSize=1 2024-12-13T00:56:27,115 DEBUG [RS:2;9812beb1e4cd:40687 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0, corePoolSize=3, maxPoolSize=3 2024-12-13T00:56:27,115 DEBUG [RS:2;9812beb1e4cd:40687 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/9812beb1e4cd:0, corePoolSize=3, maxPoolSize=3 2024-12-13T00:56:27,128 INFO [RS:2;9812beb1e4cd:40687 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-13T00:56:27,129 INFO [RS:2;9812beb1e4cd:40687 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-13T00:56:27,129 INFO [RS:2;9812beb1e4cd:40687 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-13T00:56:27,129 INFO [RS:2;9812beb1e4cd:40687 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-13T00:56:27,129 INFO [RS:2;9812beb1e4cd:40687 {}] hbase.ChoreService(168): Chore ScheduledChore name=9812beb1e4cd,40687,1734051385054-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-13T00:56:27,143 INFO [RS:1;9812beb1e4cd:45231 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-13T00:56:27,143 INFO [RS:0;9812beb1e4cd:46075 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-13T00:56:27,146 INFO [RS:1;9812beb1e4cd:45231 {}] hbase.ChoreService(168): Chore ScheduledChore name=9812beb1e4cd,45231,1734051384968-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-13T00:56:27,146 INFO [RS:0;9812beb1e4cd:46075 {}] hbase.ChoreService(168): Chore ScheduledChore name=9812beb1e4cd,46075,1734051384875-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-13T00:56:27,154 INFO [RS:2;9812beb1e4cd:40687 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-13T00:56:27,154 INFO [RS:2;9812beb1e4cd:40687 {}] hbase.ChoreService(168): Chore ScheduledChore name=9812beb1e4cd,40687,1734051385054-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-13T00:56:27,176 INFO [RS:1;9812beb1e4cd:45231 {}] regionserver.Replication(204): 9812beb1e4cd,45231,1734051384968 started 2024-12-13T00:56:27,176 INFO [RS:1;9812beb1e4cd:45231 {}] regionserver.HRegionServer(1767): Serving as 9812beb1e4cd,45231,1734051384968, RpcServer on 9812beb1e4cd/172.17.0.2:45231, sessionid=0x10088dfff310002 2024-12-13T00:56:27,177 DEBUG [RS:1;9812beb1e4cd:45231 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-13T00:56:27,177 DEBUG [RS:1;9812beb1e4cd:45231 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 9812beb1e4cd,45231,1734051384968 2024-12-13T00:56:27,177 DEBUG [RS:1;9812beb1e4cd:45231 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '9812beb1e4cd,45231,1734051384968' 2024-12-13T00:56:27,177 DEBUG [RS:1;9812beb1e4cd:45231 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-13T00:56:27,178 DEBUG [RS:1;9812beb1e4cd:45231 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-13T00:56:27,178 DEBUG [RS:1;9812beb1e4cd:45231 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-13T00:56:27,179 INFO [RS:0;9812beb1e4cd:46075 {}] regionserver.Replication(204): 9812beb1e4cd,46075,1734051384875 started 2024-12-13T00:56:27,179 DEBUG [RS:1;9812beb1e4cd:45231 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-13T00:56:27,179 INFO [RS:0;9812beb1e4cd:46075 {}] regionserver.HRegionServer(1767): Serving as 9812beb1e4cd,46075,1734051384875, RpcServer on 9812beb1e4cd/172.17.0.2:46075, sessionid=0x10088dfff310001 2024-12-13T00:56:27,179 DEBUG [RS:1;9812beb1e4cd:45231 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 9812beb1e4cd,45231,1734051384968 2024-12-13T00:56:27,179 DEBUG [RS:1;9812beb1e4cd:45231 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '9812beb1e4cd,45231,1734051384968' 2024-12-13T00:56:27,179 INFO [RS:2;9812beb1e4cd:40687 {}] regionserver.Replication(204): 9812beb1e4cd,40687,1734051385054 started 2024-12-13T00:56:27,179 DEBUG [RS:1;9812beb1e4cd:45231 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-13T00:56:27,179 DEBUG [RS:0;9812beb1e4cd:46075 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-13T00:56:27,179 INFO [RS:2;9812beb1e4cd:40687 {}] regionserver.HRegionServer(1767): Serving as 9812beb1e4cd,40687,1734051385054, RpcServer on 9812beb1e4cd/172.17.0.2:40687, sessionid=0x10088dfff310003 2024-12-13T00:56:27,179 DEBUG [RS:0;9812beb1e4cd:46075 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 9812beb1e4cd,46075,1734051384875 2024-12-13T00:56:27,179 DEBUG [RS:0;9812beb1e4cd:46075 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '9812beb1e4cd,46075,1734051384875' 2024-12-13T00:56:27,179 DEBUG [RS:2;9812beb1e4cd:40687 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-13T00:56:27,179 DEBUG [RS:0;9812beb1e4cd:46075 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-13T00:56:27,179 DEBUG [RS:2;9812beb1e4cd:40687 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 9812beb1e4cd,40687,1734051385054 2024-12-13T00:56:27,179 DEBUG [RS:2;9812beb1e4cd:40687 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '9812beb1e4cd,40687,1734051385054' 2024-12-13T00:56:27,180 DEBUG [RS:2;9812beb1e4cd:40687 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-13T00:56:27,180 DEBUG [RS:1;9812beb1e4cd:45231 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-13T00:56:27,180 DEBUG [RS:0;9812beb1e4cd:46075 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-13T00:56:27,180 DEBUG [RS:2;9812beb1e4cd:40687 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-13T00:56:27,181 DEBUG [RS:0;9812beb1e4cd:46075 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-13T00:56:27,181 DEBUG [RS:1;9812beb1e4cd:45231 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-13T00:56:27,181 DEBUG [RS:0;9812beb1e4cd:46075 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-13T00:56:27,181 DEBUG [RS:0;9812beb1e4cd:46075 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 9812beb1e4cd,46075,1734051384875 2024-12-13T00:56:27,181 DEBUG [RS:2;9812beb1e4cd:40687 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-13T00:56:27,181 INFO [RS:1;9812beb1e4cd:45231 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-13T00:56:27,181 DEBUG [RS:0;9812beb1e4cd:46075 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '9812beb1e4cd,46075,1734051384875' 2024-12-13T00:56:27,181 DEBUG [RS:2;9812beb1e4cd:40687 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-13T00:56:27,181 INFO [RS:1;9812beb1e4cd:45231 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-13T00:56:27,181 DEBUG [RS:2;9812beb1e4cd:40687 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 9812beb1e4cd,40687,1734051385054 2024-12-13T00:56:27,181 DEBUG [RS:0;9812beb1e4cd:46075 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-13T00:56:27,181 DEBUG [RS:2;9812beb1e4cd:40687 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '9812beb1e4cd,40687,1734051385054' 2024-12-13T00:56:27,181 DEBUG [RS:2;9812beb1e4cd:40687 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-13T00:56:27,182 DEBUG [RS:0;9812beb1e4cd:46075 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-13T00:56:27,182 DEBUG [RS:2;9812beb1e4cd:40687 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-13T00:56:27,182 DEBUG [RS:0;9812beb1e4cd:46075 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-13T00:56:27,182 DEBUG [RS:2;9812beb1e4cd:40687 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-13T00:56:27,182 INFO [RS:0;9812beb1e4cd:46075 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-13T00:56:27,182 INFO [RS:2;9812beb1e4cd:40687 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-13T00:56:27,182 INFO [RS:0;9812beb1e4cd:46075 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-13T00:56:27,182 INFO [RS:2;9812beb1e4cd:40687 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-13T00:56:27,289 INFO [RS:0;9812beb1e4cd:46075 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-13T00:56:27,289 INFO [RS:2;9812beb1e4cd:40687 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-13T00:56:27,289 INFO [RS:1;9812beb1e4cd:45231 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-13T00:56:27,294 INFO [RS:2;9812beb1e4cd:40687 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=9812beb1e4cd%2C40687%2C1734051385054, suffix=, logDir=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/WALs/9812beb1e4cd,40687,1734051385054, archiveDir=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/oldWALs, maxLogs=32 2024-12-13T00:56:27,294 INFO [RS:0;9812beb1e4cd:46075 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=9812beb1e4cd%2C46075%2C1734051384875, suffix=, logDir=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/WALs/9812beb1e4cd,46075,1734051384875, archiveDir=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/oldWALs, maxLogs=32 2024-12-13T00:56:27,295 INFO [RS:1;9812beb1e4cd:45231 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=9812beb1e4cd%2C45231%2C1734051384968, suffix=, logDir=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/WALs/9812beb1e4cd,45231,1734051384968, archiveDir=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/oldWALs, maxLogs=32 2024-12-13T00:56:27,321 DEBUG [RS:0;9812beb1e4cd:46075 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/WALs/9812beb1e4cd,46075,1734051384875/9812beb1e4cd%2C46075%2C1734051384875.1734051387298, exclude list is [], retry=0 2024-12-13T00:56:27,321 DEBUG [RS:1;9812beb1e4cd:45231 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/WALs/9812beb1e4cd,45231,1734051384968/9812beb1e4cd%2C45231%2C1734051384968.1734051387299, exclude list is [], retry=0 2024-12-13T00:56:27,324 WARN [IPC Server handler 1 on default port 46775 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-13T00:56:27,324 WARN [IPC Server handler 3 on default port 46775 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-13T00:56:27,324 WARN [IPC Server handler 3 on default port 46775 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-13T00:56:27,324 WARN [IPC Server handler 1 on default port 46775 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-13T00:56:27,324 WARN [IPC Server handler 3 on default port 46775 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-13T00:56:27,324 WARN [IPC Server handler 1 on default port 46775 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-13T00:56:27,327 DEBUG [RS-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42021,DS-1f7dacd0-1c22-4b88-8711-8d3d7164bce1,DISK] 2024-12-13T00:56:27,328 DEBUG [RS:2;9812beb1e4cd:40687 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/WALs/9812beb1e4cd,40687,1734051385054/9812beb1e4cd%2C40687%2C1734051385054.1734051387299, exclude list is [], retry=0 2024-12-13T00:56:27,328 DEBUG [RS-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39849,DS-09ec43bf-5a6b-4a4f-ba78-e2fdd8819408,DISK] 2024-12-13T00:56:27,329 DEBUG [RS-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39849,DS-09ec43bf-5a6b-4a4f-ba78-e2fdd8819408,DISK] 2024-12-13T00:56:27,329 DEBUG [RS-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42021,DS-1f7dacd0-1c22-4b88-8711-8d3d7164bce1,DISK] 2024-12-13T00:56:27,331 WARN [IPC Server handler 3 on default port 46775 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-13T00:56:27,332 WARN [IPC Server handler 3 on default port 46775 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-13T00:56:27,332 WARN [IPC Server handler 3 on default port 46775 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-13T00:56:27,335 DEBUG [RS-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39849,DS-09ec43bf-5a6b-4a4f-ba78-e2fdd8819408,DISK] 2024-12-13T00:56:27,336 DEBUG [RS-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42021,DS-1f7dacd0-1c22-4b88-8711-8d3d7164bce1,DISK] 2024-12-13T00:56:27,337 INFO [RS:0;9812beb1e4cd:46075 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/WALs/9812beb1e4cd,46075,1734051384875/9812beb1e4cd%2C46075%2C1734051384875.1734051387298 2024-12-13T00:56:27,340 DEBUG [RS:0;9812beb1e4cd:46075 {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:38485:38485),(127.0.0.1/127.0.0.1:46641:46641)] 2024-12-13T00:56:27,342 INFO [RS:1;9812beb1e4cd:45231 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/WALs/9812beb1e4cd,45231,1734051384968/9812beb1e4cd%2C45231%2C1734051384968.1734051387299 2024-12-13T00:56:27,342 DEBUG [RS:1;9812beb1e4cd:45231 {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:46641:46641),(127.0.0.1/127.0.0.1:38485:38485)] 2024-12-13T00:56:27,376 INFO [RS:2;9812beb1e4cd:40687 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/WALs/9812beb1e4cd,40687,1734051385054/9812beb1e4cd%2C40687%2C1734051385054.1734051387299 2024-12-13T00:56:27,377 DEBUG [RS:2;9812beb1e4cd:40687 {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:38485:38485),(127.0.0.1/127.0.0.1:46641:46641)] 2024-12-13T00:56:27,565 DEBUG [9812beb1e4cd:38293 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-13T00:56:27,568 DEBUG [9812beb1e4cd:38293 {}] balancer.BalancerClusterState(202): Hosts are {9812beb1e4cd=0} racks are {/default-rack=0} 2024-12-13T00:56:27,575 DEBUG [9812beb1e4cd:38293 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-13T00:56:27,575 DEBUG [9812beb1e4cd:38293 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-13T00:56:27,575 DEBUG [9812beb1e4cd:38293 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-13T00:56:27,575 INFO [9812beb1e4cd:38293 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-13T00:56:27,575 INFO [9812beb1e4cd:38293 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-13T00:56:27,575 INFO [9812beb1e4cd:38293 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-13T00:56:27,575 DEBUG [9812beb1e4cd:38293 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-13T00:56:27,580 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=9812beb1e4cd,40687,1734051385054 2024-12-13T00:56:27,586 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 9812beb1e4cd,40687,1734051385054, state=OPENING 2024-12-13T00:56:27,591 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-13T00:56:27,593 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40687-0x10088dfff310003, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T00:56:27,593 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T00:56:27,593 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46075-0x10088dfff310001, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T00:56:27,593 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45231-0x10088dfff310002, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T00:56:27,594 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-13T00:56:27,594 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-13T00:56:27,594 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-13T00:56:27,594 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-13T00:56:27,597 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=9812beb1e4cd,40687,1734051385054}] 2024-12-13T00:56:27,776 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,40687,1734051385054 2024-12-13T00:56:27,778 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-13T00:56:27,780 INFO [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56668, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-13T00:56:27,797 INFO [RS_OPEN_META-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-12-13T00:56:27,797 INFO [RS_OPEN_META-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-13T00:56:27,798 INFO [RS_OPEN_META-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-13T00:56:27,802 INFO [RS_OPEN_META-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=9812beb1e4cd%2C40687%2C1734051385054.meta, suffix=.meta, logDir=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/WALs/9812beb1e4cd,40687,1734051385054, archiveDir=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/oldWALs, maxLogs=32 2024-12-13T00:56:27,827 DEBUG [RS_OPEN_META-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/WALs/9812beb1e4cd,40687,1734051385054/9812beb1e4cd%2C40687%2C1734051385054.meta.1734051387804.meta, exclude list is [], retry=0 2024-12-13T00:56:27,832 WARN [IPC Server handler 0 on default port 46775 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-13T00:56:27,832 WARN [IPC Server handler 0 on default port 46775 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-13T00:56:27,832 WARN [IPC Server handler 0 on default port 46775 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-13T00:56:27,835 DEBUG [RS-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42021,DS-1f7dacd0-1c22-4b88-8711-8d3d7164bce1,DISK] 2024-12-13T00:56:27,835 DEBUG [RS-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39849,DS-09ec43bf-5a6b-4a4f-ba78-e2fdd8819408,DISK] 2024-12-13T00:56:27,841 INFO [RS_OPEN_META-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/WALs/9812beb1e4cd,40687,1734051385054/9812beb1e4cd%2C40687%2C1734051385054.meta.1734051387804.meta 2024-12-13T00:56:27,843 DEBUG [RS_OPEN_META-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:46641:46641),(127.0.0.1/127.0.0.1:38485:38485)] 2024-12-13T00:56:27,843 DEBUG [RS_OPEN_META-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-13T00:56:27,844 DEBUG [RS_OPEN_META-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=AccessControlService 2024-12-13T00:56:27,846 INFO [RS_OPEN_META-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-13T00:56:27,846 DEBUG [RS_OPEN_META-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-13T00:56:27,848 DEBUG [RS_OPEN_META-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-13T00:56:27,850 INFO [RS_OPEN_META-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-13T00:56:27,861 DEBUG [RS_OPEN_META-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-13T00:56:27,861 DEBUG [RS_OPEN_META-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T00:56:27,862 DEBUG [RS_OPEN_META-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-12-13T00:56:27,862 DEBUG [RS_OPEN_META-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-12-13T00:56:27,865 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-13T00:56:27,867 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-13T00:56:27,867 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T00:56:27,868 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-13T00:56:27,869 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-13T00:56:27,870 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-13T00:56:27,871 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T00:56:27,872 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-13T00:56:27,873 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-13T00:56:27,877 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-13T00:56:27,877 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T00:56:27,878 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-13T00:56:27,880 DEBUG [RS_OPEN_META-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/hbase/meta/1588230740 2024-12-13T00:56:27,883 DEBUG [RS_OPEN_META-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/hbase/meta/1588230740 2024-12-13T00:56:27,886 DEBUG [RS_OPEN_META-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-13T00:56:27,890 DEBUG [RS_OPEN_META-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-13T00:56:27,892 INFO [RS_OPEN_META-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74294951, jitterRate=0.10708104074001312}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-13T00:56:27,897 DEBUG [RS_OPEN_META-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-13T00:56:27,908 INFO [RS_OPEN_META-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1734051387769 2024-12-13T00:56:27,924 DEBUG [RS_OPEN_META-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-13T00:56:27,924 INFO [RS_OPEN_META-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-12-13T00:56:27,925 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=9812beb1e4cd,40687,1734051385054 2024-12-13T00:56:27,928 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 9812beb1e4cd,40687,1734051385054, state=OPEN 2024-12-13T00:56:27,931 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46075-0x10088dfff310001, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-13T00:56:27,931 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45231-0x10088dfff310002, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-13T00:56:27,931 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40687-0x10088dfff310003, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-13T00:56:27,931 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-13T00:56:27,932 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-13T00:56:27,932 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-13T00:56:27,932 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-13T00:56:27,932 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-13T00:56:27,940 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-12-13T00:56:27,940 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=9812beb1e4cd,40687,1734051385054 in 335 msec 2024-12-13T00:56:27,948 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-12-13T00:56:27,948 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 1.0640 sec 2024-12-13T00:56:27,955 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 1.4970 sec 2024-12-13T00:56:27,956 INFO [master/9812beb1e4cd:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1734051387956, completionTime=-1 2024-12-13T00:56:27,956 INFO [master/9812beb1e4cd:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-13T00:56:27,956 DEBUG [master/9812beb1e4cd:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-12-13T00:56:28,007 DEBUG [hconnection-0x71b2f156-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T00:56:28,010 INFO [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56680, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T00:56:28,031 INFO [master/9812beb1e4cd:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=3 2024-12-13T00:56:28,031 INFO [master/9812beb1e4cd:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1734051448031 2024-12-13T00:56:28,032 INFO [master/9812beb1e4cd:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1734051508032 2024-12-13T00:56:28,032 INFO [master/9812beb1e4cd:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 75 msec 2024-12-13T00:56:28,058 DEBUG [master/9812beb1e4cd:0:becomeActiveMaster {}] balancer.RegionLocationFinder(172): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-13T00:56:28,069 INFO [master/9812beb1e4cd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9812beb1e4cd,38293,1734051383822-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-13T00:56:28,070 INFO [master/9812beb1e4cd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9812beb1e4cd,38293,1734051383822-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-13T00:56:28,070 INFO [master/9812beb1e4cd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9812beb1e4cd,38293,1734051383822-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-13T00:56:28,072 INFO [master/9812beb1e4cd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-9812beb1e4cd:38293, period=300000, unit=MILLISECONDS is enabled. 2024-12-13T00:56:28,072 INFO [master/9812beb1e4cd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-13T00:56:28,080 DEBUG [master/9812beb1e4cd:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-13T00:56:28,083 INFO [master/9812beb1e4cd:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-12-13T00:56:28,084 INFO [master/9812beb1e4cd:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-13T00:56:28,091 DEBUG [master/9812beb1e4cd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-12-13T00:56:28,094 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-12-13T00:56:28,095 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T00:56:28,097 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-13T00:56:28,105 WARN [IPC Server handler 3 on default port 46775 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-13T00:56:28,105 WARN [IPC Server handler 3 on default port 46775 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-13T00:56:28,105 WARN [IPC Server handler 3 on default port 46775 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-13T00:56:28,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741837_1013 (size=358) 2024-12-13T00:56:28,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741837_1013 (size=358) 2024-12-13T00:56:28,116 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => c77b7057609be1e5abde48e096a7d0b7, NAME => 'hbase:namespace,,1734051388084.c77b7057609be1e5abde48e096a7d0b7.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee 2024-12-13T00:56:28,121 WARN [IPC Server handler 2 on default port 46775 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-13T00:56:28,121 WARN [IPC Server handler 2 on default port 46775 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-13T00:56:28,121 WARN [IPC Server handler 2 on default port 46775 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-13T00:56:28,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741838_1014 (size=42) 2024-12-13T00:56:28,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741838_1014 (size=42) 2024-12-13T00:56:28,131 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1734051388084.c77b7057609be1e5abde48e096a7d0b7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T00:56:28,131 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing c77b7057609be1e5abde48e096a7d0b7, disabling compactions & flushes 2024-12-13T00:56:28,132 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1734051388084.c77b7057609be1e5abde48e096a7d0b7. 2024-12-13T00:56:28,132 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1734051388084.c77b7057609be1e5abde48e096a7d0b7. 2024-12-13T00:56:28,132 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1734051388084.c77b7057609be1e5abde48e096a7d0b7. after waiting 0 ms 2024-12-13T00:56:28,132 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1734051388084.c77b7057609be1e5abde48e096a7d0b7. 2024-12-13T00:56:28,132 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1734051388084.c77b7057609be1e5abde48e096a7d0b7. 2024-12-13T00:56:28,132 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for c77b7057609be1e5abde48e096a7d0b7: 2024-12-13T00:56:28,134 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-12-13T00:56:28,144 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1734051388084.c77b7057609be1e5abde48e096a7d0b7.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1734051388136"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734051388136"}]},"ts":"1734051388136"} 2024-12-13T00:56:28,179 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-13T00:56:28,181 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-13T00:56:28,185 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734051388181"}]},"ts":"1734051388181"} 2024-12-13T00:56:28,190 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-12-13T00:56:28,195 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(202): Hosts are {9812beb1e4cd=0} racks are {/default-rack=0} 2024-12-13T00:56:28,197 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-13T00:56:28,197 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-13T00:56:28,197 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-13T00:56:28,197 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-13T00:56:28,197 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-13T00:56:28,197 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-13T00:56:28,197 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-13T00:56:28,198 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=c77b7057609be1e5abde48e096a7d0b7, ASSIGN}] 2024-12-13T00:56:28,201 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=c77b7057609be1e5abde48e096a7d0b7, ASSIGN 2024-12-13T00:56:28,203 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=c77b7057609be1e5abde48e096a7d0b7, ASSIGN; state=OFFLINE, location=9812beb1e4cd,45231,1734051384968; forceNewPlan=false, retain=false 2024-12-13T00:56:28,354 INFO [9812beb1e4cd:38293 {}] balancer.BaseLoadBalancer(546): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-13T00:56:28,355 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=c77b7057609be1e5abde48e096a7d0b7, regionState=OPENING, regionLocation=9812beb1e4cd,45231,1734051384968 2024-12-13T00:56:28,359 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure c77b7057609be1e5abde48e096a7d0b7, server=9812beb1e4cd,45231,1734051384968}] 2024-12-13T00:56:28,515 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,45231,1734051384968 2024-12-13T00:56:28,515 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-13T00:56:28,519 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45784, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-13T00:56:28,527 INFO [RS_OPEN_PRIORITY_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1734051388084.c77b7057609be1e5abde48e096a7d0b7. 2024-12-13T00:56:28,527 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => c77b7057609be1e5abde48e096a7d0b7, NAME => 'hbase:namespace,,1734051388084.c77b7057609be1e5abde48e096a7d0b7.', STARTKEY => '', ENDKEY => ''} 2024-12-13T00:56:28,528 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:namespace,,1734051388084.c77b7057609be1e5abde48e096a7d0b7. service=AccessControlService 2024-12-13T00:56:28,529 INFO [RS_OPEN_PRIORITY_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-13T00:56:28,529 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace c77b7057609be1e5abde48e096a7d0b7 2024-12-13T00:56:28,529 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1734051388084.c77b7057609be1e5abde48e096a7d0b7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T00:56:28,529 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for c77b7057609be1e5abde48e096a7d0b7 2024-12-13T00:56:28,529 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for c77b7057609be1e5abde48e096a7d0b7 2024-12-13T00:56:28,536 INFO [StoreOpener-c77b7057609be1e5abde48e096a7d0b7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region c77b7057609be1e5abde48e096a7d0b7 2024-12-13T00:56:28,542 INFO [StoreOpener-c77b7057609be1e5abde48e096a7d0b7-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c77b7057609be1e5abde48e096a7d0b7 columnFamilyName info 2024-12-13T00:56:28,542 DEBUG [StoreOpener-c77b7057609be1e5abde48e096a7d0b7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T00:56:28,543 INFO [StoreOpener-c77b7057609be1e5abde48e096a7d0b7-1 {}] regionserver.HStore(327): Store=c77b7057609be1e5abde48e096a7d0b7/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T00:56:28,546 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/hbase/namespace/c77b7057609be1e5abde48e096a7d0b7 2024-12-13T00:56:28,547 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/hbase/namespace/c77b7057609be1e5abde48e096a7d0b7 2024-12-13T00:56:28,552 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for c77b7057609be1e5abde48e096a7d0b7 2024-12-13T00:56:28,558 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/hbase/namespace/c77b7057609be1e5abde48e096a7d0b7/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T00:56:28,559 INFO [RS_OPEN_PRIORITY_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened c77b7057609be1e5abde48e096a7d0b7; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59584849, jitterRate=-0.11211656033992767}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-13T00:56:28,560 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for c77b7057609be1e5abde48e096a7d0b7: 2024-12-13T00:56:28,572 INFO [RS_OPEN_PRIORITY_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1734051388084.c77b7057609be1e5abde48e096a7d0b7., pid=6, masterSystemTime=1734051388515 2024-12-13T00:56:28,577 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1734051388084.c77b7057609be1e5abde48e096a7d0b7. 2024-12-13T00:56:28,577 INFO [RS_OPEN_PRIORITY_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1734051388084.c77b7057609be1e5abde48e096a7d0b7. 2024-12-13T00:56:28,579 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=c77b7057609be1e5abde48e096a7d0b7, regionState=OPEN, openSeqNum=2, regionLocation=9812beb1e4cd,45231,1734051384968 2024-12-13T00:56:28,590 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-12-13T00:56:28,592 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure c77b7057609be1e5abde48e096a7d0b7, server=9812beb1e4cd,45231,1734051384968 in 225 msec 2024-12-13T00:56:28,596 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-12-13T00:56:28,597 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=c77b7057609be1e5abde48e096a7d0b7, ASSIGN in 392 msec 2024-12-13T00:56:28,598 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-13T00:56:28,599 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734051388598"}]},"ts":"1734051388598"} 2024-12-13T00:56:28,602 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-12-13T00:56:28,606 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-12-13T00:56:28,611 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 521 msec 2024-12-13T00:56:28,695 DEBUG [master/9812beb1e4cd:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-12-13T00:56:28,697 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40687-0x10088dfff310003, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T00:56:28,700 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46075-0x10088dfff310001, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T00:56:28,701 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45231-0x10088dfff310002, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T00:56:28,702 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-12-13T00:56:28,702 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T00:56:28,733 DEBUG [master/9812beb1e4cd:0:becomeActiveMaster {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T00:56:28,747 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45792, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T00:56:28,759 DEBUG [master/9812beb1e4cd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-12-13T00:56:28,779 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-13T00:56:28,789 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 31 msec 2024-12-13T00:56:28,801 DEBUG [master/9812beb1e4cd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-12-13T00:56:28,817 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-13T00:56:28,867 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 21 msec 2024-12-13T00:56:28,901 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-12-13T00:56:28,919 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-12-13T00:56:28,921 INFO [master/9812beb1e4cd:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 3.742sec 2024-12-13T00:56:28,939 INFO [master/9812beb1e4cd:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-13T00:56:28,941 INFO [master/9812beb1e4cd:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-13T00:56:28,943 INFO [master/9812beb1e4cd:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-13T00:56:28,944 INFO [master/9812beb1e4cd:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-13T00:56:28,944 INFO [master/9812beb1e4cd:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-13T00:56:28,946 INFO [master/9812beb1e4cd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9812beb1e4cd,38293,1734051383822-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-13T00:56:28,946 INFO [master/9812beb1e4cd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9812beb1e4cd,38293,1734051383822-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-13T00:56:28,991 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5634e099 to 127.0.0.1:49424 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2b7138e1 2024-12-13T00:56:28,997 INFO [master/9812beb1e4cd:0:becomeActiveMaster {}] master.HMaster$4(2389): Client=null/null create 'hbase:acl', {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-13T00:56:29,001 DEBUG [master/9812beb1e4cd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:acl 2024-12-13T00:56:29,003 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_PRE_OPERATION 2024-12-13T00:56:29,003 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T00:56:29,006 INFO [master/9812beb1e4cd:0:becomeActiveMaster {}] master.MasterRpcServices(713): Client=null/null procedure request for creating table: namespace: "hbase" qualifier: "acl" procId is: 9 2024-12-13T00:56:29,006 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-13T00:56:29,016 DEBUG [master/9812beb1e4cd:0:becomeActiveMaster {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-13T00:56:29,018 WARN [Time-limited test {}] client.ZKConnectionRegistry(90): ZKConnectionRegistry is deprecated. See https://hbase.apache.org/book.html#client.rpcconnectionregistry 2024-12-13T00:56:29,106 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3470f3ec, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T00:56:29,119 DEBUG [master/9812beb1e4cd:0:becomeActiveMaster {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-13T00:56:29,129 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-13T00:56:29,129 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-13T00:56:29,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741839_1015 (size=349) 2024-12-13T00:56:29,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741839_1015 (size=349) 2024-12-13T00:56:29,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741839_1015 (size=349) 2024-12-13T00:56:29,230 DEBUG [hconnection-0x375b9881-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T00:56:29,238 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => f365b65dae95a0516608c8eeb7b680b7, NAME => 'hbase:acl,,1734051388980.f365b65dae95a0516608c8eeb7b680b7.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:acl', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee 2024-12-13T00:56:29,303 INFO [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56688, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T00:56:29,307 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=9812beb1e4cd,38293,1734051383822 2024-12-13T00:56:29,308 INFO [Time-limited test {}] hbase.HBaseTestingUtility(2790): Starting mini mapreduce cluster... 2024-12-13T00:56:29,308 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/test.cache.data in system properties and HBase conf 2024-12-13T00:56:29,308 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/hadoop.tmp.dir in system properties and HBase conf 2024-12-13T00:56:29,308 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/hadoop.log.dir in system properties and HBase conf 2024-12-13T00:56:29,308 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-13T00:56:29,308 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-13T00:56:29,308 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-13T00:56:29,309 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-13T00:56:29,309 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-13T00:56:29,309 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-13T00:56:29,309 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-13T00:56:29,309 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-13T00:56:29,309 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-13T00:56:29,309 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-13T00:56:29,309 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-13T00:56:29,309 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-13T00:56:29,309 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/nfs.dump.dir in system properties and HBase conf 2024-12-13T00:56:29,310 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/java.io.tmpdir in system properties and HBase conf 2024-12-13T00:56:29,310 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-13T00:56:29,310 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-13T00:56:29,310 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-13T00:56:29,320 DEBUG [master/9812beb1e4cd:0:becomeActiveMaster {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-13T00:56:29,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741840_1016 (size=36) 2024-12-13T00:56:29,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741840_1016 (size=36) 2024-12-13T00:56:29,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741840_1016 (size=36) 2024-12-13T00:56:29,373 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:acl,,1734051388980.f365b65dae95a0516608c8eeb7b680b7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T00:56:29,374 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1681): Closing f365b65dae95a0516608c8eeb7b680b7, disabling compactions & flushes 2024-12-13T00:56:29,374 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:acl,,1734051388980.f365b65dae95a0516608c8eeb7b680b7. 2024-12-13T00:56:29,374 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:acl,,1734051388980.f365b65dae95a0516608c8eeb7b680b7. 2024-12-13T00:56:29,374 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:acl,,1734051388980.f365b65dae95a0516608c8eeb7b680b7. after waiting 0 ms 2024-12-13T00:56:29,374 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:acl,,1734051388980.f365b65dae95a0516608c8eeb7b680b7. 2024-12-13T00:56:29,374 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1922): Closed hbase:acl,,1734051388980.f365b65dae95a0516608c8eeb7b680b7. 2024-12-13T00:56:29,375 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1635): Region close journal for f365b65dae95a0516608c8eeb7b680b7: 2024-12-13T00:56:29,378 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_ADD_TO_META 2024-12-13T00:56:29,378 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:acl,,1734051388980.f365b65dae95a0516608c8eeb7b680b7.","families":{"info":[{"qualifier":"regioninfo","vlen":35,"tag":[],"timestamp":"1734051389378"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734051389378"}]},"ts":"1734051389378"} 2024-12-13T00:56:29,386 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-13T00:56:29,388 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-13T00:56:29,388 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:acl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734051389388"}]},"ts":"1734051389388"} 2024-12-13T00:56:29,392 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:acl, state=ENABLING in hbase:meta 2024-12-13T00:56:29,397 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(202): Hosts are {9812beb1e4cd=0} racks are {/default-rack=0} 2024-12-13T00:56:29,400 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-13T00:56:29,400 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-13T00:56:29,400 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-13T00:56:29,400 INFO [PEWorker-2 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-13T00:56:29,401 INFO [PEWorker-2 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-13T00:56:29,401 INFO [PEWorker-2 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-13T00:56:29,401 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-13T00:56:29,401 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:acl, region=f365b65dae95a0516608c8eeb7b680b7, ASSIGN}] 2024-12-13T00:56:29,406 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:acl, region=f365b65dae95a0516608c8eeb7b680b7, ASSIGN 2024-12-13T00:56:29,409 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:acl, region=f365b65dae95a0516608c8eeb7b680b7, ASSIGN; state=OFFLINE, location=9812beb1e4cd,40687,1734051385054; forceNewPlan=false, retain=false 2024-12-13T00:56:29,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741841_1017 (size=592039) 2024-12-13T00:56:29,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741841_1017 (size=592039) 2024-12-13T00:56:29,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741841_1017 (size=592039) 2024-12-13T00:56:29,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741842_1018 (size=1663647) 2024-12-13T00:56:29,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741842_1018 (size=1663647) 2024-12-13T00:56:29,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741842_1018 (size=1663647) 2024-12-13T00:56:29,559 INFO [9812beb1e4cd:38293 {}] balancer.BaseLoadBalancer(546): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-13T00:56:29,560 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=f365b65dae95a0516608c8eeb7b680b7, regionState=OPENING, regionLocation=9812beb1e4cd,40687,1734051385054 2024-12-13T00:56:29,564 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure f365b65dae95a0516608c8eeb7b680b7, server=9812beb1e4cd,40687,1734051385054}] 2024-12-13T00:56:29,624 DEBUG [master/9812beb1e4cd:0:becomeActiveMaster {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-13T00:56:29,740 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,40687,1734051385054 2024-12-13T00:56:29,796 INFO [RS_OPEN_PRIORITY_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] handler.AssignRegionHandler(135): Open hbase:acl,,1734051388980.f365b65dae95a0516608c8eeb7b680b7. 2024-12-13T00:56:29,796 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => f365b65dae95a0516608c8eeb7b680b7, NAME => 'hbase:acl,,1734051388980.f365b65dae95a0516608c8eeb7b680b7.', STARTKEY => '', ENDKEY => ''} 2024-12-13T00:56:29,797 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:acl,,1734051388980.f365b65dae95a0516608c8eeb7b680b7. service=AccessControlService 2024-12-13T00:56:29,797 INFO [RS_OPEN_PRIORITY_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-13T00:56:29,798 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table acl f365b65dae95a0516608c8eeb7b680b7 2024-12-13T00:56:29,798 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(894): Instantiated hbase:acl,,1734051388980.f365b65dae95a0516608c8eeb7b680b7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T00:56:29,798 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for f365b65dae95a0516608c8eeb7b680b7 2024-12-13T00:56:29,798 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for f365b65dae95a0516608c8eeb7b680b7 2024-12-13T00:56:29,816 INFO [StoreOpener-f365b65dae95a0516608c8eeb7b680b7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family l of region f365b65dae95a0516608c8eeb7b680b7 2024-12-13T00:56:29,824 INFO [StoreOpener-f365b65dae95a0516608c8eeb7b680b7-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f365b65dae95a0516608c8eeb7b680b7 columnFamilyName l 2024-12-13T00:56:29,825 DEBUG [StoreOpener-f365b65dae95a0516608c8eeb7b680b7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T00:56:29,829 INFO [StoreOpener-f365b65dae95a0516608c8eeb7b680b7-1 {}] regionserver.HStore(327): Store=f365b65dae95a0516608c8eeb7b680b7/l, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T00:56:29,831 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/hbase/acl/f365b65dae95a0516608c8eeb7b680b7 2024-12-13T00:56:29,832 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/hbase/acl/f365b65dae95a0516608c8eeb7b680b7 2024-12-13T00:56:29,837 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for f365b65dae95a0516608c8eeb7b680b7 2024-12-13T00:56:29,847 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/hbase/acl/f365b65dae95a0516608c8eeb7b680b7/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T00:56:29,848 INFO [RS_OPEN_PRIORITY_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(1102): Opened f365b65dae95a0516608c8eeb7b680b7; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72300886, jitterRate=0.0773671567440033}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-13T00:56:29,851 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for f365b65dae95a0516608c8eeb7b680b7: 2024-12-13T00:56:29,854 INFO [RS_OPEN_PRIORITY_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:acl,,1734051388980.f365b65dae95a0516608c8eeb7b680b7., pid=11, masterSystemTime=1734051389740 2024-12-13T00:56:29,858 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:acl,,1734051388980.f365b65dae95a0516608c8eeb7b680b7. 2024-12-13T00:56:29,858 INFO [RS_OPEN_PRIORITY_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] handler.AssignRegionHandler(164): Opened hbase:acl,,1734051388980.f365b65dae95a0516608c8eeb7b680b7. 2024-12-13T00:56:29,859 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=f365b65dae95a0516608c8eeb7b680b7, regionState=OPEN, openSeqNum=2, regionLocation=9812beb1e4cd,40687,1734051385054 2024-12-13T00:56:29,867 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-12-13T00:56:29,867 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure f365b65dae95a0516608c8eeb7b680b7, server=9812beb1e4cd,40687,1734051385054 in 298 msec 2024-12-13T00:56:29,874 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-13T00:56:29,875 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:acl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734051389874"}]},"ts":"1734051389874"} 2024-12-13T00:56:29,878 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:acl, state=ENABLED in hbase:meta 2024-12-13T00:56:29,878 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-12-13T00:56:29,878 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=hbase:acl, region=f365b65dae95a0516608c8eeb7b680b7, ASSIGN in 466 msec 2024-12-13T00:56:29,886 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_POST_OPERATION 2024-12-13T00:56:29,894 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=hbase:acl in 889 msec 2024-12-13T00:56:30,125 DEBUG [master/9812beb1e4cd:0:becomeActiveMaster {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-13T00:56:30,125 INFO [master/9812beb1e4cd:0:becomeActiveMaster {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: hbase:acl, procId: 9 completed 2024-12-13T00:56:30,146 DEBUG [master/9812beb1e4cd:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-12-13T00:56:30,148 INFO [master/9812beb1e4cd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-13T00:56:30,148 INFO [master/9812beb1e4cd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9812beb1e4cd,38293,1734051383822-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-13T00:56:31,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741837_1013 (size=358) 2024-12-13T00:56:31,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741838_1014 (size=42) 2024-12-13T00:56:31,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741828_1004 (size=1189) 2024-12-13T00:56:31,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741832_1008 (size=32) 2024-12-13T00:56:31,306 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-13T00:56:31,453 WARN [Thread-388 {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-13T00:56:31,827 INFO [Thread-388 {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-13T00:56:31,831 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-12-13T00:56:31,833 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-13T00:56:31,856 INFO [Thread-388 {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-13T00:56:31,856 INFO [Thread-388 {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-13T00:56:31,857 INFO [Thread-388 {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-13T00:56:31,860 INFO [Thread-388 {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@65067abe{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/hadoop.log.dir/,AVAILABLE} 2024-12-13T00:56:31,861 INFO [Thread-388 {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2bc4bd65{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-13T00:56:31,864 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-13T00:56:31,864 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-13T00:56:31,864 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-13T00:56:31,879 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-13T00:56:31,904 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@275baefc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/hadoop.log.dir/,AVAILABLE} 2024-12-13T00:56:31,905 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5ca29929{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-13T00:56:32,067 INFO [Thread-388 {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.resourcemanager.webapp.JAXBContextResolver as a provider class 2024-12-13T00:56:32,067 INFO [Thread-388 {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebServices as a root resource class 2024-12-13T00:56:32,068 INFO [Thread-388 {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-12-13T00:56:32,070 INFO [Thread-388 {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-12-13T00:56:32,176 INFO [Thread-388 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.resourcemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-13T00:56:32,646 INFO [Thread-388 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-13T00:56:33,112 INFO [Thread-388 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-13T00:56:33,138 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@44c86ad0{cluster,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/java.io.tmpdir/jetty-localhost-40465-hadoop-yarn-common-3_4_1_jar-_-any-2701069860599976943/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/cluster} 2024-12-13T00:56:33,138 INFO [Thread-388 {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@f3a232c{jobhistory,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/java.io.tmpdir/jetty-localhost-39111-hadoop-yarn-common-3_4_1_jar-_-any-16430272839465879578/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/jobhistory} 2024-12-13T00:56:33,140 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7c37b269{HTTP/1.1, (http/1.1)}{localhost:40465} 2024-12-13T00:56:33,140 INFO [Time-limited test {}] server.Server(415): Started @17473ms 2024-12-13T00:56:33,144 INFO [Thread-388 {}] server.AbstractConnector(333): Started ServerConnector@57670a5{HTTP/1.1, (http/1.1)}{localhost:39111} 2024-12-13T00:56:33,144 INFO [Thread-388 {}] server.Server(415): Started @17478ms 2024-12-13T00:56:33,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741843_1019 (size=5) 2024-12-13T00:56:33,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741843_1019 (size=5) 2024-12-13T00:56:33,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741843_1019 (size=5) 2024-12-13T00:56:33,383 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-13T00:56:33,465 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-13T00:56:33,468 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:acl' 2024-12-13T00:56:33,470 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-12-13T00:56:34,402 WARN [Time-limited test {}] tracker.NMLogAggregationStatusTracker(95): Log Aggregation is disabled.So is the LogAggregationStatusTracker. 2024-12-13T00:56:34,408 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-13T00:56:34,469 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-12-13T00:56:34,470 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-13T00:56:34,530 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-13T00:56:34,531 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase RegionObservers 2024-12-13T00:56:34,532 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-13T00:56:34,532 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-13T00:56:34,534 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-13T00:56:34,534 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase RegionServerObservers 2024-12-13T00:56:34,534 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-13T00:56:34,534 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-13T00:56:34,536 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-12-13T00:56:34,536 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_namespace Metrics about Tables on a single HBase RegionServer 2024-12-13T00:56:34,537 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_acl 2024-12-13T00:56:34,537 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_acl Metrics about Tables on a single HBase RegionServer 2024-12-13T00:56:34,539 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-13T00:56:34,539 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase MasterObservers 2024-12-13T00:56:34,541 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-12-13T00:56:34,541 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver Metrics about HBase MasterObservers 2024-12-13T00:56:34,542 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-13T00:56:34,542 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-13T00:56:34,554 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-13T00:56:34,554 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-13T00:56:34,554 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-13T00:56:34,572 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-13T00:56:34,576 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@145f37c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/hadoop.log.dir/,AVAILABLE} 2024-12-13T00:56:34,577 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4b778ac2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-13T00:56:34,680 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices as a root resource class 2024-12-13T00:56:34,680 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-12-13T00:56:34,680 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver as a provider class 2024-12-13T00:56:34,680 INFO [Time-limited test {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-12-13T00:56:34,693 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-13T00:56:34,717 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-13T00:56:34,897 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-13T00:56:34,912 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6920e688{node,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/java.io.tmpdir/jetty-localhost-41121-hadoop-yarn-common-3_4_1_jar-_-any-2422345233692744793/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-13T00:56:34,913 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@188b6f48{HTTP/1.1, (http/1.1)}{localhost:41121} 2024-12-13T00:56:34,913 INFO [Time-limited test {}] server.Server(415): Started @19247ms 2024-12-13T00:56:35,142 WARN [Time-limited test {}] tracker.NMLogAggregationStatusTracker(95): Log Aggregation is disabled.So is the LogAggregationStatusTracker. 2024-12-13T00:56:35,146 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-13T00:56:35,175 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-12-13T00:56:35,178 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-13T00:56:35,196 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-13T00:56:35,196 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-13T00:56:35,197 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-13T00:56:35,200 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-13T00:56:35,204 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@370d6475{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/hadoop.log.dir/,AVAILABLE} 2024-12-13T00:56:35,204 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@34c8e5c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-13T00:56:35,286 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices as a root resource class 2024-12-13T00:56:35,286 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-12-13T00:56:35,286 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver as a provider class 2024-12-13T00:56:35,287 INFO [Time-limited test {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-12-13T00:56:35,296 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-13T00:56:35,302 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-13T00:56:35,422 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-13T00:56:35,428 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@592fdfd3{node,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/java.io.tmpdir/jetty-localhost-35191-hadoop-yarn-common-3_4_1_jar-_-any-6397200953909981388/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-13T00:56:35,429 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@779c3c65{HTTP/1.1, (http/1.1)}{localhost:35191} 2024-12-13T00:56:35,430 INFO [Time-limited test {}] server.Server(415): Started @19763ms 2024-12-13T00:56:35,481 INFO [Time-limited test {}] hbase.HBaseTestingUtility(2825): Mini mapreduce cluster started 2024-12-13T00:56:35,483 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [30,000] milli-secs(wait.for.ratio=[1]) 2024-12-13T00:56:35,517 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportWithTargetName Thread=719, OpenFileDescriptor=751, MaxFileDescriptor=1048576, SystemLoadAverage=318, ProcessCount=11, AvailableMemoryMB=6596 2024-12-13T00:56:35,518 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=719 is superior to 500 2024-12-13T00:56:35,532 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-13T00:56:35,537 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57734, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-13T00:56:35,547 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-13T00:56:35,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] procedure2.ProcedureExecutor(1098): Stored pid=12, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testExportWithTargetName 2024-12-13T00:56:35,552 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=12, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_PRE_OPERATION 2024-12-13T00:56:35,553 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithTargetName" procId is: 12 2024-12-13T00:56:35,553 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T00:56:35,555 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=12, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-13T00:56:35,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-13T00:56:35,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741844_1020 (size=406) 2024-12-13T00:56:35,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741844_1020 (size=406) 2024-12-13T00:56:35,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741844_1020 (size=406) 2024-12-13T00:56:35,585 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 38b1a09d42abeda4002161e823a82c0d, NAME => 'testtb-testExportWithTargetName,,1734051395546.38b1a09d42abeda4002161e823a82c0d.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee 2024-12-13T00:56:35,586 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => 8774c2fcd6c1e98abff96f4c986ac930, NAME => 'testtb-testExportWithTargetName,1,1734051395546.8774c2fcd6c1e98abff96f4c986ac930.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee 2024-12-13T00:56:35,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741846_1022 (size=67) 2024-12-13T00:56:35,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741846_1022 (size=67) 2024-12-13T00:56:35,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741846_1022 (size=67) 2024-12-13T00:56:35,605 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testExportWithTargetName,1,1734051395546.8774c2fcd6c1e98abff96f4c986ac930.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T00:56:35,605 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1681): Closing 8774c2fcd6c1e98abff96f4c986ac930, disabling compactions & flushes 2024-12-13T00:56:35,605 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testExportWithTargetName,1,1734051395546.8774c2fcd6c1e98abff96f4c986ac930. 2024-12-13T00:56:35,605 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithTargetName,1,1734051395546.8774c2fcd6c1e98abff96f4c986ac930. 2024-12-13T00:56:35,606 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithTargetName,1,1734051395546.8774c2fcd6c1e98abff96f4c986ac930. after waiting 0 ms 2024-12-13T00:56:35,606 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithTargetName,1,1734051395546.8774c2fcd6c1e98abff96f4c986ac930. 2024-12-13T00:56:35,606 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testExportWithTargetName,1,1734051395546.8774c2fcd6c1e98abff96f4c986ac930. 2024-12-13T00:56:35,606 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1635): Region close journal for 8774c2fcd6c1e98abff96f4c986ac930: 2024-12-13T00:56:35,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741845_1021 (size=67) 2024-12-13T00:56:35,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741845_1021 (size=67) 2024-12-13T00:56:35,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741845_1021 (size=67) 2024-12-13T00:56:35,610 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testExportWithTargetName,,1734051395546.38b1a09d42abeda4002161e823a82c0d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T00:56:35,611 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1681): Closing 38b1a09d42abeda4002161e823a82c0d, disabling compactions & flushes 2024-12-13T00:56:35,611 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testExportWithTargetName,,1734051395546.38b1a09d42abeda4002161e823a82c0d. 2024-12-13T00:56:35,611 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithTargetName,,1734051395546.38b1a09d42abeda4002161e823a82c0d. 2024-12-13T00:56:35,611 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithTargetName,,1734051395546.38b1a09d42abeda4002161e823a82c0d. after waiting 0 ms 2024-12-13T00:56:35,611 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithTargetName,,1734051395546.38b1a09d42abeda4002161e823a82c0d. 2024-12-13T00:56:35,611 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testExportWithTargetName,,1734051395546.38b1a09d42abeda4002161e823a82c0d. 2024-12-13T00:56:35,611 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1635): Region close journal for 38b1a09d42abeda4002161e823a82c0d: 2024-12-13T00:56:35,613 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=12, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_ADD_TO_META 2024-12-13T00:56:35,614 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportWithTargetName,1,1734051395546.8774c2fcd6c1e98abff96f4c986ac930.","families":{"info":[{"qualifier":"regioninfo","vlen":66,"tag":[],"timestamp":"1734051395613"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734051395613"}]},"ts":"1734051395613"} 2024-12-13T00:56:35,614 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportWithTargetName,,1734051395546.38b1a09d42abeda4002161e823a82c0d.","families":{"info":[{"qualifier":"regioninfo","vlen":66,"tag":[],"timestamp":"1734051395613"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734051395613"}]},"ts":"1734051395613"} 2024-12-13T00:56:35,651 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-13T00:56:35,653 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=12, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-13T00:56:35,653 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734051395653"}]},"ts":"1734051395653"} 2024-12-13T00:56:35,656 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithTargetName, state=ENABLING in hbase:meta 2024-12-13T00:56:35,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-13T00:56:35,661 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(202): Hosts are {9812beb1e4cd=0} racks are {/default-rack=0} 2024-12-13T00:56:35,664 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-13T00:56:35,664 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-13T00:56:35,664 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-13T00:56:35,664 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-13T00:56:35,664 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-13T00:56:35,664 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-13T00:56:35,664 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-13T00:56:35,664 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=38b1a09d42abeda4002161e823a82c0d, ASSIGN}, {pid=14, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=8774c2fcd6c1e98abff96f4c986ac930, ASSIGN}] 2024-12-13T00:56:35,666 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=14, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=8774c2fcd6c1e98abff96f4c986ac930, ASSIGN 2024-12-13T00:56:35,667 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=13, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=38b1a09d42abeda4002161e823a82c0d, ASSIGN 2024-12-13T00:56:35,668 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=14, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=8774c2fcd6c1e98abff96f4c986ac930, ASSIGN; state=OFFLINE, location=9812beb1e4cd,45231,1734051384968; forceNewPlan=false, retain=false 2024-12-13T00:56:35,668 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=13, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=38b1a09d42abeda4002161e823a82c0d, ASSIGN; state=OFFLINE, location=9812beb1e4cd,40687,1734051385054; forceNewPlan=false, retain=false 2024-12-13T00:56:35,818 INFO [9812beb1e4cd:38293 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-13T00:56:35,819 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=13 updating hbase:meta row=38b1a09d42abeda4002161e823a82c0d, regionState=OPENING, regionLocation=9812beb1e4cd,40687,1734051385054 2024-12-13T00:56:35,819 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=14 updating hbase:meta row=8774c2fcd6c1e98abff96f4c986ac930, regionState=OPENING, regionLocation=9812beb1e4cd,45231,1734051384968 2024-12-13T00:56:35,822 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=15, ppid=14, state=RUNNABLE; OpenRegionProcedure 8774c2fcd6c1e98abff96f4c986ac930, server=9812beb1e4cd,45231,1734051384968}] 2024-12-13T00:56:35,824 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=16, ppid=13, state=RUNNABLE; OpenRegionProcedure 38b1a09d42abeda4002161e823a82c0d, server=9812beb1e4cd,40687,1734051385054}] 2024-12-13T00:56:35,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-13T00:56:35,976 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,45231,1734051384968 2024-12-13T00:56:35,979 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,40687,1734051385054 2024-12-13T00:56:35,988 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] handler.AssignRegionHandler(135): Open testtb-testExportWithTargetName,,1734051395546.38b1a09d42abeda4002161e823a82c0d. 2024-12-13T00:56:35,988 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(7285): Opening region: {ENCODED => 38b1a09d42abeda4002161e823a82c0d, NAME => 'testtb-testExportWithTargetName,,1734051395546.38b1a09d42abeda4002161e823a82c0d.', STARTKEY => '', ENDKEY => '1'} 2024-12-13T00:56:35,989 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportWithTargetName,,1734051395546.38b1a09d42abeda4002161e823a82c0d. service=AccessControlService 2024-12-13T00:56:35,989 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-13T00:56:35,990 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithTargetName 38b1a09d42abeda4002161e823a82c0d 2024-12-13T00:56:35,990 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(894): Instantiated testtb-testExportWithTargetName,,1734051395546.38b1a09d42abeda4002161e823a82c0d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T00:56:35,990 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(7327): checking encryption for 38b1a09d42abeda4002161e823a82c0d 2024-12-13T00:56:35,990 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(7330): checking classloading for 38b1a09d42abeda4002161e823a82c0d 2024-12-13T00:56:35,994 INFO [StoreOpener-38b1a09d42abeda4002161e823a82c0d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 38b1a09d42abeda4002161e823a82c0d 2024-12-13T00:56:35,997 INFO [StoreOpener-38b1a09d42abeda4002161e823a82c0d-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 38b1a09d42abeda4002161e823a82c0d columnFamilyName cf 2024-12-13T00:56:35,998 DEBUG [StoreOpener-38b1a09d42abeda4002161e823a82c0d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T00:56:35,999 INFO [StoreOpener-38b1a09d42abeda4002161e823a82c0d-1 {}] regionserver.HStore(327): Store=38b1a09d42abeda4002161e823a82c0d/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T00:56:36,004 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithTargetName/38b1a09d42abeda4002161e823a82c0d 2024-12-13T00:56:36,005 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithTargetName/38b1a09d42abeda4002161e823a82c0d 2024-12-13T00:56:36,010 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(1085): writing seq id for 38b1a09d42abeda4002161e823a82c0d 2024-12-13T00:56:36,016 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] handler.AssignRegionHandler(135): Open testtb-testExportWithTargetName,1,1734051395546.8774c2fcd6c1e98abff96f4c986ac930. 2024-12-13T00:56:36,016 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(7285): Opening region: {ENCODED => 8774c2fcd6c1e98abff96f4c986ac930, NAME => 'testtb-testExportWithTargetName,1,1734051395546.8774c2fcd6c1e98abff96f4c986ac930.', STARTKEY => '1', ENDKEY => ''} 2024-12-13T00:56:36,017 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportWithTargetName,1,1734051395546.8774c2fcd6c1e98abff96f4c986ac930. service=AccessControlService 2024-12-13T00:56:36,017 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-13T00:56:36,018 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithTargetName 8774c2fcd6c1e98abff96f4c986ac930 2024-12-13T00:56:36,018 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(894): Instantiated testtb-testExportWithTargetName,1,1734051395546.8774c2fcd6c1e98abff96f4c986ac930.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T00:56:36,018 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(7327): checking encryption for 8774c2fcd6c1e98abff96f4c986ac930 2024-12-13T00:56:36,018 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(7330): checking classloading for 8774c2fcd6c1e98abff96f4c986ac930 2024-12-13T00:56:36,020 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithTargetName/38b1a09d42abeda4002161e823a82c0d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T00:56:36,021 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(1102): Opened 38b1a09d42abeda4002161e823a82c0d; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60496712, jitterRate=-0.09852874279022217}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-13T00:56:36,023 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(1001): Region open journal for 38b1a09d42abeda4002161e823a82c0d: 2024-12-13T00:56:36,024 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportWithTargetName,,1734051395546.38b1a09d42abeda4002161e823a82c0d., pid=16, masterSystemTime=1734051395979 2024-12-13T00:56:36,025 INFO [StoreOpener-8774c2fcd6c1e98abff96f4c986ac930-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 8774c2fcd6c1e98abff96f4c986ac930 2024-12-13T00:56:36,028 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportWithTargetName,,1734051395546.38b1a09d42abeda4002161e823a82c0d. 2024-12-13T00:56:36,028 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] handler.AssignRegionHandler(164): Opened testtb-testExportWithTargetName,,1734051395546.38b1a09d42abeda4002161e823a82c0d. 2024-12-13T00:56:36,029 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=13 updating hbase:meta row=38b1a09d42abeda4002161e823a82c0d, regionState=OPEN, openSeqNum=2, regionLocation=9812beb1e4cd,40687,1734051385054 2024-12-13T00:56:36,035 INFO [StoreOpener-8774c2fcd6c1e98abff96f4c986ac930-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8774c2fcd6c1e98abff96f4c986ac930 columnFamilyName cf 2024-12-13T00:56:36,036 DEBUG [StoreOpener-8774c2fcd6c1e98abff96f4c986ac930-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T00:56:36,037 INFO [StoreOpener-8774c2fcd6c1e98abff96f4c986ac930-1 {}] regionserver.HStore(327): Store=8774c2fcd6c1e98abff96f4c986ac930/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T00:56:36,038 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithTargetName/8774c2fcd6c1e98abff96f4c986ac930 2024-12-13T00:56:36,039 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithTargetName/8774c2fcd6c1e98abff96f4c986ac930 2024-12-13T00:56:36,041 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=16, resume processing ppid=13 2024-12-13T00:56:36,041 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=16, ppid=13, state=SUCCESS; OpenRegionProcedure 38b1a09d42abeda4002161e823a82c0d, server=9812beb1e4cd,40687,1734051385054 in 210 msec 2024-12-13T00:56:36,044 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(1085): writing seq id for 8774c2fcd6c1e98abff96f4c986ac930 2024-12-13T00:56:36,044 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=13, ppid=12, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=38b1a09d42abeda4002161e823a82c0d, ASSIGN in 377 msec 2024-12-13T00:56:36,049 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithTargetName/8774c2fcd6c1e98abff96f4c986ac930/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T00:56:36,050 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(1102): Opened 8774c2fcd6c1e98abff96f4c986ac930; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59446865, jitterRate=-0.11417268216609955}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-13T00:56:36,051 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(1001): Region open journal for 8774c2fcd6c1e98abff96f4c986ac930: 2024-12-13T00:56:36,052 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportWithTargetName,1,1734051395546.8774c2fcd6c1e98abff96f4c986ac930., pid=15, masterSystemTime=1734051395975 2024-12-13T00:56:36,056 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportWithTargetName,1,1734051395546.8774c2fcd6c1e98abff96f4c986ac930. 2024-12-13T00:56:36,057 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] handler.AssignRegionHandler(164): Opened testtb-testExportWithTargetName,1,1734051395546.8774c2fcd6c1e98abff96f4c986ac930. 2024-12-13T00:56:36,057 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=14 updating hbase:meta row=8774c2fcd6c1e98abff96f4c986ac930, regionState=OPEN, openSeqNum=2, regionLocation=9812beb1e4cd,45231,1734051384968 2024-12-13T00:56:36,065 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=15, resume processing ppid=14 2024-12-13T00:56:36,065 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=15, ppid=14, state=SUCCESS; OpenRegionProcedure 8774c2fcd6c1e98abff96f4c986ac930, server=9812beb1e4cd,45231,1734051384968 in 239 msec 2024-12-13T00:56:36,069 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=14, resume processing ppid=12 2024-12-13T00:56:36,069 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=14, ppid=12, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=8774c2fcd6c1e98abff96f4c986ac930, ASSIGN in 401 msec 2024-12-13T00:56:36,072 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=12, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-13T00:56:36,073 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734051396072"}]},"ts":"1734051396072"} 2024-12-13T00:56:36,076 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithTargetName, state=ENABLED in hbase:meta 2024-12-13T00:56:36,080 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=12, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_POST_OPERATION 2024-12-13T00:56:36,085 DEBUG [PEWorker-3 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testExportWithTargetName jenkins: RWXCA 2024-12-13T00:56:36,102 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40687 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-12-13T00:56:36,132 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-13T00:56:36,132 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45231-0x10088dfff310002, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-13T00:56:36,132 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46075-0x10088dfff310001, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-13T00:56:36,132 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T00:56:36,132 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45231-0x10088dfff310002, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T00:56:36,132 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46075-0x10088dfff310001, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T00:56:36,132 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40687-0x10088dfff310003, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-13T00:56:36,133 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40687-0x10088dfff310003, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T00:56:36,139 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-13T00:56:36,139 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-13T00:56:36,139 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-13T00:56:36,144 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=12, state=SUCCESS; CreateTableProcedure table=testtb-testExportWithTargetName in 593 msec 2024-12-13T00:56:36,144 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-13T00:56:36,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-13T00:56:36,164 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testExportWithTargetName, procId: 12 completed 2024-12-13T00:56:36,164 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testtb-testExportWithTargetName get assigned. Timeout = 60000ms 2024-12-13T00:56:36,165 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-13T00:56:36,171 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testtb-testExportWithTargetName assigned to meta. Checking AM states. 2024-12-13T00:56:36,171 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-13T00:56:36,171 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testtb-testExportWithTargetName assigned. 2024-12-13T00:56:36,183 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-13T00:56:36,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734051396183 (current time:1734051396183). 2024-12-13T00:56:36,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-13T00:56:36,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testExportWithTargetName VERSION not specified, setting to 2 2024-12-13T00:56:36,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-13T00:56:36,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x557c19aa to 127.0.0.1:49424 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@48cd331 2024-12-13T00:56:36,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@23778a4b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T00:56:36,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T00:56:36,195 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57144, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T00:56:36,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x557c19aa to 127.0.0.1:49424 2024-12-13T00:56:36,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T00:56:36,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x04bc1f33 to 127.0.0.1:49424 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@46c2a58e 2024-12-13T00:56:36,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4cfbae62, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T00:56:36,209 DEBUG [hconnection-0x7fa8ad3e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T00:56:36,210 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57154, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T00:56:36,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x04bc1f33 to 127.0.0.1:49424 2024-12-13T00:56:36,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T00:56:36,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-12-13T00:56:36,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-13T00:56:36,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] procedure2.ProcedureExecutor(1098): Stored pid=17, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-13T00:56:36,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 17 2024-12-13T00:56:36,237 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-13T00:56:36,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=17 2024-12-13T00:56:36,243 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-13T00:56:36,260 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-13T00:56:36,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741847_1023 (size=167) 2024-12-13T00:56:36,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741847_1023 (size=167) 2024-12-13T00:56:36,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741847_1023 (size=167) 2024-12-13T00:56:36,276 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-13T00:56:36,279 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=18, ppid=17, state=RUNNABLE; SnapshotRegionProcedure 38b1a09d42abeda4002161e823a82c0d}, {pid=19, ppid=17, state=RUNNABLE; SnapshotRegionProcedure 8774c2fcd6c1e98abff96f4c986ac930}] 2024-12-13T00:56:36,284 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=18, ppid=17, state=RUNNABLE; SnapshotRegionProcedure 38b1a09d42abeda4002161e823a82c0d 2024-12-13T00:56:36,284 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=19, ppid=17, state=RUNNABLE; SnapshotRegionProcedure 8774c2fcd6c1e98abff96f4c986ac930 2024-12-13T00:56:36,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=17 2024-12-13T00:56:36,440 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,40687,1734051385054 2024-12-13T00:56:36,440 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,45231,1734051384968 2024-12-13T00:56:36,442 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40687 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=18 2024-12-13T00:56:36,442 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45231 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=19 2024-12-13T00:56:36,443 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=19}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,1,1734051395546.8774c2fcd6c1e98abff96f4c986ac930. 2024-12-13T00:56:36,443 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=18}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,,1734051395546.38b1a09d42abeda4002161e823a82c0d. 2024-12-13T00:56:36,445 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=19}] regionserver.HRegion(2538): Flush status journal for 8774c2fcd6c1e98abff96f4c986ac930: 2024-12-13T00:56:36,445 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=18}] regionserver.HRegion(2538): Flush status journal for 38b1a09d42abeda4002161e823a82c0d: 2024-12-13T00:56:36,445 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=19}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,1,1734051395546.8774c2fcd6c1e98abff96f4c986ac930. for emptySnaptb0-testExportWithTargetName completed. 2024-12-13T00:56:36,445 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=18}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,,1734051395546.38b1a09d42abeda4002161e823a82c0d. for emptySnaptb0-testExportWithTargetName completed. 2024-12-13T00:56:36,446 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=19}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,1,1734051395546.8774c2fcd6c1e98abff96f4c986ac930.' region-info for snapshot=emptySnaptb0-testExportWithTargetName 2024-12-13T00:56:36,446 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=18}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,,1734051395546.38b1a09d42abeda4002161e823a82c0d.' region-info for snapshot=emptySnaptb0-testExportWithTargetName 2024-12-13T00:56:36,450 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=19}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-13T00:56:36,450 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=18}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-13T00:56:36,453 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=19}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-13T00:56:36,453 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=18}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-13T00:56:36,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741849_1025 (size=70) 2024-12-13T00:56:36,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741849_1025 (size=70) 2024-12-13T00:56:36,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741849_1025 (size=70) 2024-12-13T00:56:36,500 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=19}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,1,1734051395546.8774c2fcd6c1e98abff96f4c986ac930. 2024-12-13T00:56:36,503 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=19 2024-12-13T00:56:36,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741848_1024 (size=70) 2024-12-13T00:56:36,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741848_1024 (size=70) 2024-12-13T00:56:36,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741848_1024 (size=70) 2024-12-13T00:56:36,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] master.HMaster(4106): Remote procedure done, pid=19 2024-12-13T00:56:36,507 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithTargetName on region 8774c2fcd6c1e98abff96f4c986ac930 2024-12-13T00:56:36,507 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=19, ppid=17, state=RUNNABLE; SnapshotRegionProcedure 8774c2fcd6c1e98abff96f4c986ac930 2024-12-13T00:56:36,511 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=19, ppid=17, state=SUCCESS; SnapshotRegionProcedure 8774c2fcd6c1e98abff96f4c986ac930 in 230 msec 2024-12-13T00:56:36,513 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=18}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,,1734051395546.38b1a09d42abeda4002161e823a82c0d. 2024-12-13T00:56:36,513 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=18}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=18 2024-12-13T00:56:36,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] master.HMaster(4106): Remote procedure done, pid=18 2024-12-13T00:56:36,514 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithTargetName on region 38b1a09d42abeda4002161e823a82c0d 2024-12-13T00:56:36,514 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=18, ppid=17, state=RUNNABLE; SnapshotRegionProcedure 38b1a09d42abeda4002161e823a82c0d 2024-12-13T00:56:36,519 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=18, resume processing ppid=17 2024-12-13T00:56:36,519 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=18, ppid=17, state=SUCCESS; SnapshotRegionProcedure 38b1a09d42abeda4002161e823a82c0d in 237 msec 2024-12-13T00:56:36,519 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-13T00:56:36,521 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-13T00:56:36,524 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-13T00:56:36,524 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithTargetName 2024-12-13T00:56:36,527 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithTargetName 2024-12-13T00:56:36,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=17 2024-12-13T00:56:36,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741850_1026 (size=549) 2024-12-13T00:56:36,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741850_1026 (size=549) 2024-12-13T00:56:36,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741850_1026 (size=549) 2024-12-13T00:56:36,579 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-13T00:56:36,600 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-13T00:56:36,601 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithTargetName to hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/emptySnaptb0-testExportWithTargetName 2024-12-13T00:56:36,605 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-13T00:56:36,606 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 17 2024-12-13T00:56:36,610 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=17, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } in 374 msec 2024-12-13T00:56:36,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=17 2024-12-13T00:56:36,846 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithTargetName, procId: 17 completed 2024-12-13T00:56:36,890 DEBUG [htable-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T00:56:36,890 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40687 {}] regionserver.HRegion(8254): writing data to region testtb-testExportWithTargetName,,1734051395546.38b1a09d42abeda4002161e823a82c0d. with WAL disabled. Data may be lost in the event of a crash. 2024-12-13T00:56:36,893 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53228, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T00:56:36,896 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45231 {}] regionserver.HRegion(8254): writing data to region testtb-testExportWithTargetName,1,1734051395546.8774c2fcd6c1e98abff96f4c986ac930. with WAL disabled. Data may be lost in the event of a crash. 2024-12-13T00:56:36,908 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testExportWithTargetName 2024-12-13T00:56:36,909 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testExportWithTargetName,,1734051395546.38b1a09d42abeda4002161e823a82c0d. 2024-12-13T00:56:36,910 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-13T00:56:36,958 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-13T00:56:36,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734051396958 (current time:1734051396958). 2024-12-13T00:56:36,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-13T00:56:36,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testExportWithTargetName VERSION not specified, setting to 2 2024-12-13T00:56:36,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-13T00:56:36,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x48516de2 to 127.0.0.1:49424 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@10df1b14 2024-12-13T00:56:36,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@702b1d05, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T00:56:36,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T00:56:36,985 INFO [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57166, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T00:56:36,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x48516de2 to 127.0.0.1:49424 2024-12-13T00:56:36,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T00:56:36,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x12b89708 to 127.0.0.1:49424 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7ad739f0 2024-12-13T00:56:36,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@52bcd09a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T00:56:37,009 DEBUG [hconnection-0x2d734c54-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T00:56:37,012 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57178, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T00:56:37,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x12b89708 to 127.0.0.1:49424 2024-12-13T00:56:37,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T00:56:37,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-12-13T00:56:37,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-13T00:56:37,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] procedure2.ProcedureExecutor(1098): Stored pid=20, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-13T00:56:37,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 20 2024-12-13T00:56:37,029 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-13T00:56:37,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-13T00:56:37,031 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-13T00:56:37,036 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-13T00:56:37,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741851_1027 (size=162) 2024-12-13T00:56:37,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741851_1027 (size=162) 2024-12-13T00:56:37,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741851_1027 (size=162) 2024-12-13T00:56:37,052 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-13T00:56:37,052 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=21, ppid=20, state=RUNNABLE; SnapshotRegionProcedure 38b1a09d42abeda4002161e823a82c0d}, {pid=22, ppid=20, state=RUNNABLE; SnapshotRegionProcedure 8774c2fcd6c1e98abff96f4c986ac930}] 2024-12-13T00:56:37,055 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=22, ppid=20, state=RUNNABLE; SnapshotRegionProcedure 8774c2fcd6c1e98abff96f4c986ac930 2024-12-13T00:56:37,055 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=21, ppid=20, state=RUNNABLE; SnapshotRegionProcedure 38b1a09d42abeda4002161e823a82c0d 2024-12-13T00:56:37,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-13T00:56:37,210 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,45231,1734051384968 2024-12-13T00:56:37,211 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45231 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=22 2024-12-13T00:56:37,211 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,1,1734051395546.8774c2fcd6c1e98abff96f4c986ac930. 2024-12-13T00:56:37,212 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,40687,1734051385054 2024-12-13T00:56:37,212 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.HRegion(2837): Flushing 8774c2fcd6c1e98abff96f4c986ac930 1/1 column families, dataSize=2.93 KB heapSize=6.58 KB 2024-12-13T00:56:37,214 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40687 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=21 2024-12-13T00:56:37,216 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,,1734051395546.38b1a09d42abeda4002161e823a82c0d. 2024-12-13T00:56:37,216 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.HRegion(2837): Flushing 38b1a09d42abeda4002161e823a82c0d 1/1 column families, dataSize=333 B heapSize=976 B 2024-12-13T00:56:37,313 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithTargetName/38b1a09d42abeda4002161e823a82c0d/.tmp/cf/7f818bc57cff44fb80e20ba0988cec15 is 71, key is 03806ee7a5dcbf000f7bb10938879282/cf:q/1734051396890/Put/seqid=0 2024-12-13T00:56:37,313 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithTargetName/8774c2fcd6c1e98abff96f4c986ac930/.tmp/cf/8d15a5397e2c455ba47e9a8eca7d328f is 71, key is 1438be00aa91de806c896b266f952890/cf:q/1734051396896/Put/seqid=0 2024-12-13T00:56:37,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-13T00:56:37,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741853_1029 (size=5422) 2024-12-13T00:56:37,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741853_1029 (size=5422) 2024-12-13T00:56:37,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741853_1029 (size=5422) 2024-12-13T00:56:37,365 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=333 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithTargetName/38b1a09d42abeda4002161e823a82c0d/.tmp/cf/7f818bc57cff44fb80e20ba0988cec15 2024-12-13T00:56:37,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741852_1028 (size=8188) 2024-12-13T00:56:37,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741852_1028 (size=8188) 2024-12-13T00:56:37,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741852_1028 (size=8188) 2024-12-13T00:56:37,401 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.93 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithTargetName/8774c2fcd6c1e98abff96f4c986ac930/.tmp/cf/8d15a5397e2c455ba47e9a8eca7d328f 2024-12-13T00:56:37,450 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithTargetName/38b1a09d42abeda4002161e823a82c0d/.tmp/cf/7f818bc57cff44fb80e20ba0988cec15 as hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithTargetName/38b1a09d42abeda4002161e823a82c0d/cf/7f818bc57cff44fb80e20ba0988cec15 2024-12-13T00:56:37,450 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithTargetName/8774c2fcd6c1e98abff96f4c986ac930/.tmp/cf/8d15a5397e2c455ba47e9a8eca7d328f as hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithTargetName/8774c2fcd6c1e98abff96f4c986ac930/cf/8d15a5397e2c455ba47e9a8eca7d328f 2024-12-13T00:56:37,460 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithTargetName/8774c2fcd6c1e98abff96f4c986ac930/cf/8d15a5397e2c455ba47e9a8eca7d328f, entries=45, sequenceid=6, filesize=8.0 K 2024-12-13T00:56:37,460 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithTargetName/38b1a09d42abeda4002161e823a82c0d/cf/7f818bc57cff44fb80e20ba0988cec15, entries=5, sequenceid=6, filesize=5.3 K 2024-12-13T00:56:37,464 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.HRegion(3040): Finished flush of dataSize ~333 B/333, heapSize ~960 B/960, currentSize=0 B/0 for 38b1a09d42abeda4002161e823a82c0d in 247ms, sequenceid=6, compaction requested=false 2024-12-13T00:56:37,464 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithTargetName' 2024-12-13T00:56:37,464 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.HRegion(3040): Finished flush of dataSize ~2.93 KB/3003, heapSize ~6.56 KB/6720, currentSize=0 B/0 for 8774c2fcd6c1e98abff96f4c986ac930 in 251ms, sequenceid=6, compaction requested=false 2024-12-13T00:56:37,464 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithTargetName' 2024-12-13T00:56:37,465 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.HRegion(2538): Flush status journal for 8774c2fcd6c1e98abff96f4c986ac930: 2024-12-13T00:56:37,465 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.HRegion(2538): Flush status journal for 38b1a09d42abeda4002161e823a82c0d: 2024-12-13T00:56:37,465 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,,1734051395546.38b1a09d42abeda4002161e823a82c0d. for snaptb0-testExportWithTargetName completed. 2024-12-13T00:56:37,465 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,1,1734051395546.8774c2fcd6c1e98abff96f4c986ac930. for snaptb0-testExportWithTargetName completed. 2024-12-13T00:56:37,465 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,,1734051395546.38b1a09d42abeda4002161e823a82c0d.' region-info for snapshot=snaptb0-testExportWithTargetName 2024-12-13T00:56:37,465 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-13T00:56:37,465 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithTargetName/38b1a09d42abeda4002161e823a82c0d/cf/7f818bc57cff44fb80e20ba0988cec15] hfiles 2024-12-13T00:56:37,465 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithTargetName/38b1a09d42abeda4002161e823a82c0d/cf/7f818bc57cff44fb80e20ba0988cec15 for snapshot=snaptb0-testExportWithTargetName 2024-12-13T00:56:37,466 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,1,1734051395546.8774c2fcd6c1e98abff96f4c986ac930.' region-info for snapshot=snaptb0-testExportWithTargetName 2024-12-13T00:56:37,466 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-13T00:56:37,466 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithTargetName/8774c2fcd6c1e98abff96f4c986ac930/cf/8d15a5397e2c455ba47e9a8eca7d328f] hfiles 2024-12-13T00:56:37,466 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithTargetName/8774c2fcd6c1e98abff96f4c986ac930/cf/8d15a5397e2c455ba47e9a8eca7d328f for snapshot=snaptb0-testExportWithTargetName 2024-12-13T00:56:37,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741854_1030 (size=109) 2024-12-13T00:56:37,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741854_1030 (size=109) 2024-12-13T00:56:37,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741854_1030 (size=109) 2024-12-13T00:56:37,497 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,,1734051395546.38b1a09d42abeda4002161e823a82c0d. 2024-12-13T00:56:37,497 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=21 2024-12-13T00:56:37,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] master.HMaster(4106): Remote procedure done, pid=21 2024-12-13T00:56:37,498 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithTargetName on region 38b1a09d42abeda4002161e823a82c0d 2024-12-13T00:56:37,498 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=21, ppid=20, state=RUNNABLE; SnapshotRegionProcedure 38b1a09d42abeda4002161e823a82c0d 2024-12-13T00:56:37,504 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=21, ppid=20, state=SUCCESS; SnapshotRegionProcedure 38b1a09d42abeda4002161e823a82c0d in 449 msec 2024-12-13T00:56:37,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741855_1031 (size=109) 2024-12-13T00:56:37,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741855_1031 (size=109) 2024-12-13T00:56:37,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741855_1031 (size=109) 2024-12-13T00:56:37,514 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,1,1734051395546.8774c2fcd6c1e98abff96f4c986ac930. 2024-12-13T00:56:37,514 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=22 2024-12-13T00:56:37,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] master.HMaster(4106): Remote procedure done, pid=22 2024-12-13T00:56:37,515 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithTargetName on region 8774c2fcd6c1e98abff96f4c986ac930 2024-12-13T00:56:37,515 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=22, ppid=20, state=RUNNABLE; SnapshotRegionProcedure 8774c2fcd6c1e98abff96f4c986ac930 2024-12-13T00:56:37,520 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=22, resume processing ppid=20 2024-12-13T00:56:37,521 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=22, ppid=20, state=SUCCESS; SnapshotRegionProcedure 8774c2fcd6c1e98abff96f4c986ac930 in 465 msec 2024-12-13T00:56:37,521 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-13T00:56:37,522 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-13T00:56:37,523 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-13T00:56:37,523 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithTargetName 2024-12-13T00:56:37,525 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/.tmp/snaptb0-testExportWithTargetName 2024-12-13T00:56:37,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741856_1032 (size=627) 2024-12-13T00:56:37,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741856_1032 (size=627) 2024-12-13T00:56:37,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741856_1032 (size=627) 2024-12-13T00:56:37,575 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-13T00:56:37,592 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-13T00:56:37,593 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/.tmp/snaptb0-testExportWithTargetName to hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/snaptb0-testExportWithTargetName 2024-12-13T00:56:37,596 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-13T00:56:37,596 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 20 2024-12-13T00:56:37,604 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } in 572 msec 2024-12-13T00:56:37,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-13T00:56:37,639 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithTargetName, procId: 20 completed 2024-12-13T00:56:37,639 INFO [Time-limited test {}] snapshot.TestExportSnapshot(468): HDFS export destination path: hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/export-test/export-1734051397639 2024-12-13T00:56:37,639 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=hdfs://localhost:46775, tgtDir=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/export-test/export-1734051397639, rawTgtDir=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/export-test/export-1734051397639, srcFsUri=hdfs://localhost:46775, srcDir=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee 2024-12-13T00:56:37,710 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:46775, inputRoot=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee 2024-12-13T00:56:37,710 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1349531497_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/export-test/export-1734051397639, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/export-test/export-1734051397639/.hbase-snapshot/.tmp/testExportWithTargetName 2024-12-13T00:56:37,718 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-13T00:56:37,736 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/snaptb0-testExportWithTargetName to hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/export-test/export-1734051397639/.hbase-snapshot/.tmp/testExportWithTargetName 2024-12-13T00:56:37,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741857_1033 (size=162) 2024-12-13T00:56:37,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741857_1033 (size=162) 2024-12-13T00:56:37,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741857_1033 (size=162) 2024-12-13T00:56:37,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741858_1034 (size=627) 2024-12-13T00:56:37,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741858_1034 (size=627) 2024-12-13T00:56:37,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741858_1034 (size=627) 2024-12-13T00:56:37,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741859_1035 (size=154) 2024-12-13T00:56:37,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741859_1035 (size=154) 2024-12-13T00:56:37,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741859_1035 (size=154) 2024-12-13T00:56:37,873 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar 2024-12-13T00:56:37,874 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-13T00:56:37,874 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-13T00:56:37,875 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-13T00:56:39,145 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/hadoop-18010376447764538566.jar 2024-12-13T00:56:39,145 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-13T00:56:39,146 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-13T00:56:39,303 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/hadoop-17309578149158264579.jar 2024-12-13T00:56:39,304 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-13T00:56:39,304 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-13T00:56:39,305 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-13T00:56:39,305 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-13T00:56:39,305 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-13T00:56:39,306 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-13T00:56:39,306 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-13T00:56:39,306 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-13T00:56:39,307 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-13T00:56:39,307 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-13T00:56:39,307 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-13T00:56:39,308 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-13T00:56:39,308 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-13T00:56:39,309 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-13T00:56:39,309 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-13T00:56:39,309 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-13T00:56:39,310 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-13T00:56:39,310 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-13T00:56:39,312 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-13T00:56:39,313 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-13T00:56:39,313 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-13T00:56:39,313 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-13T00:56:39,314 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-13T00:56:39,314 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-13T00:56:39,315 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-13T00:56:39,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741860_1036 (size=127628) 2024-12-13T00:56:39,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741860_1036 (size=127628) 2024-12-13T00:56:39,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741860_1036 (size=127628) 2024-12-13T00:56:39,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741861_1037 (size=2172137) 2024-12-13T00:56:39,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741861_1037 (size=2172137) 2024-12-13T00:56:39,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741861_1037 (size=2172137) 2024-12-13T00:56:39,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741862_1038 (size=213228) 2024-12-13T00:56:39,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741862_1038 (size=213228) 2024-12-13T00:56:39,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741862_1038 (size=213228) 2024-12-13T00:56:39,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741863_1039 (size=6350924) 2024-12-13T00:56:39,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741863_1039 (size=6350924) 2024-12-13T00:56:39,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741863_1039 (size=6350924) 2024-12-13T00:56:39,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741864_1040 (size=1877034) 2024-12-13T00:56:39,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741864_1040 (size=1877034) 2024-12-13T00:56:39,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741864_1040 (size=1877034) 2024-12-13T00:56:39,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741865_1041 (size=451756) 2024-12-13T00:56:39,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741865_1041 (size=451756) 2024-12-13T00:56:39,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741865_1041 (size=451756) 2024-12-13T00:56:39,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741866_1042 (size=533455) 2024-12-13T00:56:39,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741866_1042 (size=533455) 2024-12-13T00:56:39,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741866_1042 (size=533455) 2024-12-13T00:56:39,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741867_1043 (size=7280644) 2024-12-13T00:56:39,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741867_1043 (size=7280644) 2024-12-13T00:56:39,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741867_1043 (size=7280644) 2024-12-13T00:56:39,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741868_1044 (size=4188619) 2024-12-13T00:56:39,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741868_1044 (size=4188619) 2024-12-13T00:56:39,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741868_1044 (size=4188619) 2024-12-13T00:56:39,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741869_1045 (size=20406) 2024-12-13T00:56:39,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741869_1045 (size=20406) 2024-12-13T00:56:39,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741869_1045 (size=20406) 2024-12-13T00:56:39,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741870_1046 (size=75495) 2024-12-13T00:56:39,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741870_1046 (size=75495) 2024-12-13T00:56:40,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741870_1046 (size=75495) 2024-12-13T00:56:40,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741871_1047 (size=45609) 2024-12-13T00:56:40,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741871_1047 (size=45609) 2024-12-13T00:56:40,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741871_1047 (size=45609) 2024-12-13T00:56:40,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741872_1048 (size=110084) 2024-12-13T00:56:40,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741872_1048 (size=110084) 2024-12-13T00:56:40,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741872_1048 (size=110084) 2024-12-13T00:56:40,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741873_1049 (size=1323991) 2024-12-13T00:56:40,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741873_1049 (size=1323991) 2024-12-13T00:56:40,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741873_1049 (size=1323991) 2024-12-13T00:56:40,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741874_1050 (size=23076) 2024-12-13T00:56:40,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741874_1050 (size=23076) 2024-12-13T00:56:40,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741874_1050 (size=23076) 2024-12-13T00:56:40,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741875_1051 (size=126803) 2024-12-13T00:56:40,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741875_1051 (size=126803) 2024-12-13T00:56:40,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741875_1051 (size=126803) 2024-12-13T00:56:40,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741876_1052 (size=322274) 2024-12-13T00:56:40,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741876_1052 (size=322274) 2024-12-13T00:56:40,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741876_1052 (size=322274) 2024-12-13T00:56:40,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741877_1053 (size=1832290) 2024-12-13T00:56:40,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741877_1053 (size=1832290) 2024-12-13T00:56:40,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741877_1053 (size=1832290) 2024-12-13T00:56:40,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741878_1054 (size=30081) 2024-12-13T00:56:40,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741878_1054 (size=30081) 2024-12-13T00:56:40,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741878_1054 (size=30081) 2024-12-13T00:56:40,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741879_1055 (size=53616) 2024-12-13T00:56:40,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741879_1055 (size=53616) 2024-12-13T00:56:40,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741879_1055 (size=53616) 2024-12-13T00:56:40,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741880_1056 (size=29229) 2024-12-13T00:56:40,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741880_1056 (size=29229) 2024-12-13T00:56:40,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741880_1056 (size=29229) 2024-12-13T00:56:40,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741881_1057 (size=169089) 2024-12-13T00:56:40,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741881_1057 (size=169089) 2024-12-13T00:56:40,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741881_1057 (size=169089) 2024-12-13T00:56:40,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741882_1058 (size=5175431) 2024-12-13T00:56:40,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741882_1058 (size=5175431) 2024-12-13T00:56:40,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741882_1058 (size=5175431) 2024-12-13T00:56:40,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741883_1059 (size=136454) 2024-12-13T00:56:40,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741883_1059 (size=136454) 2024-12-13T00:56:40,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741883_1059 (size=136454) 2024-12-13T00:56:40,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741884_1060 (size=907855) 2024-12-13T00:56:40,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741884_1060 (size=907855) 2024-12-13T00:56:40,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741884_1060 (size=907855) 2024-12-13T00:56:41,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741885_1061 (size=3317408) 2024-12-13T00:56:41,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741885_1061 (size=3317408) 2024-12-13T00:56:41,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741885_1061 (size=3317408) 2024-12-13T00:56:41,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741886_1062 (size=503880) 2024-12-13T00:56:41,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741886_1062 (size=503880) 2024-12-13T00:56:41,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741886_1062 (size=503880) 2024-12-13T00:56:41,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741887_1063 (size=4695811) 2024-12-13T00:56:41,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741887_1063 (size=4695811) 2024-12-13T00:56:41,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741887_1063 (size=4695811) 2024-12-13T00:56:41,133 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-13T00:56:41,156 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb0-testExportWithTargetName' hfile list 2024-12-13T00:56:41,169 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=13.3 K 2024-12-13T00:56:41,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741888_1064 (size=342) 2024-12-13T00:56:41,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741888_1064 (size=342) 2024-12-13T00:56:41,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741888_1064 (size=342) 2024-12-13T00:56:41,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741889_1065 (size=15) 2024-12-13T00:56:41,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741889_1065 (size=15) 2024-12-13T00:56:41,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741889_1065 (size=15) 2024-12-13T00:56:41,500 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-13T00:56:41,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741890_1066 (size=304890) 2024-12-13T00:56:41,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741890_1066 (size=304890) 2024-12-13T00:56:41,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741890_1066 (size=304890) 2024-12-13T00:56:41,926 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-13T00:56:41,927 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-13T00:56:42,136 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734051393214_0001_000001 (auth:SIMPLE) from 127.0.0.1:48180 2024-12-13T00:56:44,530 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName 2024-12-13T00:56:44,530 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName Metrics about Tables on a single HBase RegionServer 2024-12-13T00:56:50,906 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734051393214_0001_000001 (auth:SIMPLE) from 127.0.0.1:41946 2024-12-13T00:56:51,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741891_1067 (size=350564) 2024-12-13T00:56:51,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741891_1067 (size=350564) 2024-12-13T00:56:51,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741891_1067 (size=350564) 2024-12-13T00:56:52,760 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-13T00:56:53,293 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734051393214_0001_000001 (auth:SIMPLE) from 127.0.0.1:43722 2024-12-13T00:56:57,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741892_1068 (size=8188) 2024-12-13T00:56:57,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741892_1068 (size=8188) 2024-12-13T00:56:57,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741892_1068 (size=8188) 2024-12-13T00:56:57,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741893_1069 (size=5422) 2024-12-13T00:56:57,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741893_1069 (size=5422) 2024-12-13T00:56:57,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741893_1069 (size=5422) 2024-12-13T00:56:57,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741894_1070 (size=17419) 2024-12-13T00:56:57,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741894_1070 (size=17419) 2024-12-13T00:56:57,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741894_1070 (size=17419) 2024-12-13T00:56:57,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741895_1071 (size=464) 2024-12-13T00:56:57,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741895_1071 (size=464) 2024-12-13T00:56:57,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741895_1071 (size=464) 2024-12-13T00:56:57,696 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/MiniMRCluster_1870862804/yarn-2296387876/MiniMRCluster_1870862804-localDir-nm-0_1/usercache/jenkins/appcache/application_1734051393214_0001/container_1734051393214_0001_01_000002/launch_container.sh] 2024-12-13T00:56:57,696 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/MiniMRCluster_1870862804/yarn-2296387876/MiniMRCluster_1870862804-localDir-nm-0_1/usercache/jenkins/appcache/application_1734051393214_0001/container_1734051393214_0001_01_000002/container_tokens] 2024-12-13T00:56:57,697 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/MiniMRCluster_1870862804/yarn-2296387876/MiniMRCluster_1870862804-localDir-nm-0_1/usercache/jenkins/appcache/application_1734051393214_0001/container_1734051393214_0001_01_000002/sysfs] 2024-12-13T00:56:57,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741896_1072 (size=17419) 2024-12-13T00:56:57,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741896_1072 (size=17419) 2024-12-13T00:56:57,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741896_1072 (size=17419) 2024-12-13T00:56:57,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741897_1073 (size=350564) 2024-12-13T00:56:57,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741897_1073 (size=350564) 2024-12-13T00:56:57,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741897_1073 (size=350564) 2024-12-13T00:56:57,886 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734051393214_0001_000001 (auth:SIMPLE) from 127.0.0.1:43726 2024-12-13T00:56:59,380 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-13T00:56:59,383 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46510, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-13T00:56:59,383 INFO [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export 2024-12-13T00:56:59,385 INFO [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity. 2024-12-13T00:56:59,403 INFO [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: testExportWithTargetName 2024-12-13T00:56:59,403 INFO [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot 2024-12-13T00:56:59,404 INFO [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state 2024-12-13T00:56:59,404 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1349531497_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/snaptb0-testExportWithTargetName at hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/snaptb0-testExportWithTargetName 2024-12-13T00:56:59,405 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/snaptb0-testExportWithTargetName/.snapshotinfo 2024-12-13T00:56:59,405 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/snaptb0-testExportWithTargetName/data.manifest 2024-12-13T00:56:59,405 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1349531497_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/export-test/export-1734051397639/.hbase-snapshot/testExportWithTargetName at hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/export-test/export-1734051397639/.hbase-snapshot/testExportWithTargetName 2024-12-13T00:56:59,406 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/export-test/export-1734051397639/.hbase-snapshot/testExportWithTargetName/.snapshotinfo 2024-12-13T00:56:59,406 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/export-test/export-1734051397639/.hbase-snapshot/testExportWithTargetName/data.manifest 2024-12-13T00:56:59,424 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testExportWithTargetName 2024-12-13T00:56:59,429 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testExportWithTargetName 2024-12-13T00:56:59,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] procedure2.ProcedureExecutor(1098): Stored pid=23, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testExportWithTargetName 2024-12-13T00:56:59,441 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734051419441"}]},"ts":"1734051419441"} 2024-12-13T00:56:59,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=23 2024-12-13T00:56:59,444 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithTargetName, state=DISABLING in hbase:meta 2024-12-13T00:56:59,469 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithTargetName to state=DISABLING 2024-12-13T00:56:59,473 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=24, ppid=23, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testExportWithTargetName}] 2024-12-13T00:56:59,480 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=25, ppid=24, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=38b1a09d42abeda4002161e823a82c0d, UNASSIGN}, {pid=26, ppid=24, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=8774c2fcd6c1e98abff96f4c986ac930, UNASSIGN}] 2024-12-13T00:56:59,481 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=25, ppid=24, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=38b1a09d42abeda4002161e823a82c0d, UNASSIGN 2024-12-13T00:56:59,482 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=26, ppid=24, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=8774c2fcd6c1e98abff96f4c986ac930, UNASSIGN 2024-12-13T00:56:59,483 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=25 updating hbase:meta row=38b1a09d42abeda4002161e823a82c0d, regionState=CLOSING, regionLocation=9812beb1e4cd,40687,1734051385054 2024-12-13T00:56:59,483 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=26 updating hbase:meta row=8774c2fcd6c1e98abff96f4c986ac930, regionState=CLOSING, regionLocation=9812beb1e4cd,45231,1734051384968 2024-12-13T00:56:59,486 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-13T00:56:59,486 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=27, ppid=25, state=RUNNABLE; CloseRegionProcedure 38b1a09d42abeda4002161e823a82c0d, server=9812beb1e4cd,40687,1734051385054}] 2024-12-13T00:56:59,487 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-13T00:56:59,494 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=28, ppid=26, state=RUNNABLE; CloseRegionProcedure 8774c2fcd6c1e98abff96f4c986ac930, server=9812beb1e4cd,45231,1734051384968}] 2024-12-13T00:56:59,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=23 2024-12-13T00:56:59,649 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,40687,1734051385054 2024-12-13T00:56:59,653 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,45231,1734051384968 2024-12-13T00:56:59,653 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] handler.UnassignRegionHandler(124): Close 38b1a09d42abeda4002161e823a82c0d 2024-12-13T00:56:59,654 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-13T00:56:59,655 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HRegion(1681): Closing 38b1a09d42abeda4002161e823a82c0d, disabling compactions & flushes 2024-12-13T00:56:59,655 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HRegion(1703): Closing region testtb-testExportWithTargetName,,1734051395546.38b1a09d42abeda4002161e823a82c0d. 2024-12-13T00:56:59,655 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithTargetName,,1734051395546.38b1a09d42abeda4002161e823a82c0d. 2024-12-13T00:56:59,655 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithTargetName,,1734051395546.38b1a09d42abeda4002161e823a82c0d. after waiting 0 ms 2024-12-13T00:56:59,655 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithTargetName,,1734051395546.38b1a09d42abeda4002161e823a82c0d. 2024-12-13T00:56:59,660 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] handler.UnassignRegionHandler(124): Close 8774c2fcd6c1e98abff96f4c986ac930 2024-12-13T00:56:59,660 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-13T00:56:59,660 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] regionserver.HRegion(1681): Closing 8774c2fcd6c1e98abff96f4c986ac930, disabling compactions & flushes 2024-12-13T00:56:59,661 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] regionserver.HRegion(1703): Closing region testtb-testExportWithTargetName,1,1734051395546.8774c2fcd6c1e98abff96f4c986ac930. 2024-12-13T00:56:59,661 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithTargetName,1,1734051395546.8774c2fcd6c1e98abff96f4c986ac930. 2024-12-13T00:56:59,661 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithTargetName,1,1734051395546.8774c2fcd6c1e98abff96f4c986ac930. after waiting 0 ms 2024-12-13T00:56:59,661 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithTargetName,1,1734051395546.8774c2fcd6c1e98abff96f4c986ac930. 2024-12-13T00:56:59,682 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithTargetName/38b1a09d42abeda4002161e823a82c0d/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-13T00:56:59,683 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithTargetName/8774c2fcd6c1e98abff96f4c986ac930/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-13T00:56:59,687 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-13T00:56:59,688 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HRegion(1922): Closed testtb-testExportWithTargetName,,1734051395546.38b1a09d42abeda4002161e823a82c0d. 2024-12-13T00:56:59,688 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HRegion(1635): Region close journal for 38b1a09d42abeda4002161e823a82c0d: 2024-12-13T00:56:59,687 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-13T00:56:59,688 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] regionserver.HRegion(1922): Closed testtb-testExportWithTargetName,1,1734051395546.8774c2fcd6c1e98abff96f4c986ac930. 2024-12-13T00:56:59,688 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] regionserver.HRegion(1635): Region close journal for 8774c2fcd6c1e98abff96f4c986ac930: 2024-12-13T00:56:59,693 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] handler.UnassignRegionHandler(170): Closed 38b1a09d42abeda4002161e823a82c0d 2024-12-13T00:56:59,694 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=25 updating hbase:meta row=38b1a09d42abeda4002161e823a82c0d, regionState=CLOSED 2024-12-13T00:56:59,696 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] handler.UnassignRegionHandler(170): Closed 8774c2fcd6c1e98abff96f4c986ac930 2024-12-13T00:56:59,698 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=26 updating hbase:meta row=8774c2fcd6c1e98abff96f4c986ac930, regionState=CLOSED 2024-12-13T00:56:59,706 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=27, resume processing ppid=25 2024-12-13T00:56:59,706 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=27, ppid=25, state=SUCCESS; CloseRegionProcedure 38b1a09d42abeda4002161e823a82c0d, server=9812beb1e4cd,40687,1734051385054 in 215 msec 2024-12-13T00:56:59,707 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=28, resume processing ppid=26 2024-12-13T00:56:59,709 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=25, ppid=24, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=38b1a09d42abeda4002161e823a82c0d, UNASSIGN in 226 msec 2024-12-13T00:56:59,710 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=28, ppid=26, state=SUCCESS; CloseRegionProcedure 8774c2fcd6c1e98abff96f4c986ac930, server=9812beb1e4cd,45231,1734051384968 in 216 msec 2024-12-13T00:56:59,711 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=26, resume processing ppid=24 2024-12-13T00:56:59,711 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=26, ppid=24, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=8774c2fcd6c1e98abff96f4c986ac930, UNASSIGN in 227 msec 2024-12-13T00:56:59,716 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=24, resume processing ppid=23 2024-12-13T00:56:59,716 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=24, ppid=23, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testExportWithTargetName in 240 msec 2024-12-13T00:56:59,718 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734051419717"}]},"ts":"1734051419717"} 2024-12-13T00:56:59,720 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithTargetName, state=DISABLED in hbase:meta 2024-12-13T00:56:59,738 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithTargetName to state=DISABLED 2024-12-13T00:56:59,743 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=23, state=SUCCESS; DisableTableProcedure table=testtb-testExportWithTargetName in 308 msec 2024-12-13T00:56:59,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=23 2024-12-13T00:56:59,745 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testExportWithTargetName, procId: 23 completed 2024-12-13T00:56:59,750 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testExportWithTargetName 2024-12-13T00:56:59,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] procedure2.ProcedureExecutor(1098): Stored pid=29, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-13T00:56:59,761 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=29, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-13T00:56:59,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testExportWithTargetName 2024-12-13T00:56:59,763 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=29, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-13T00:56:59,768 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40687 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testExportWithTargetName 2024-12-13T00:56:59,775 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithTargetName/38b1a09d42abeda4002161e823a82c0d 2024-12-13T00:56:59,776 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithTargetName/8774c2fcd6c1e98abff96f4c986ac930 2024-12-13T00:56:59,783 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithTargetName/38b1a09d42abeda4002161e823a82c0d/cf, FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithTargetName/38b1a09d42abeda4002161e823a82c0d/recovered.edits] 2024-12-13T00:56:59,783 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithTargetName/8774c2fcd6c1e98abff96f4c986ac930/cf, FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithTargetName/8774c2fcd6c1e98abff96f4c986ac930/recovered.edits] 2024-12-13T00:56:59,838 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-13T00:56:59,838 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40687-0x10088dfff310003, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-13T00:56:59,838 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45231-0x10088dfff310002, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-13T00:56:59,839 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46075-0x10088dfff310001, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-13T00:56:59,840 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-12-13T00:56:59,844 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-12-13T00:56:59,851 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithTargetName/8774c2fcd6c1e98abff96f4c986ac930/cf/8d15a5397e2c455ba47e9a8eca7d328f to hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/archive/data/default/testtb-testExportWithTargetName/8774c2fcd6c1e98abff96f4c986ac930/cf/8d15a5397e2c455ba47e9a8eca7d328f 2024-12-13T00:56:59,852 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithTargetName/38b1a09d42abeda4002161e823a82c0d/cf/7f818bc57cff44fb80e20ba0988cec15 to hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/archive/data/default/testtb-testExportWithTargetName/38b1a09d42abeda4002161e823a82c0d/cf/7f818bc57cff44fb80e20ba0988cec15 2024-12-13T00:56:59,862 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithTargetName/8774c2fcd6c1e98abff96f4c986ac930/recovered.edits/9.seqid to hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/archive/data/default/testtb-testExportWithTargetName/8774c2fcd6c1e98abff96f4c986ac930/recovered.edits/9.seqid 2024-12-13T00:56:59,863 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithTargetName/8774c2fcd6c1e98abff96f4c986ac930 2024-12-13T00:56:59,869 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45231-0x10088dfff310002, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-13T00:56:59,869 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45231-0x10088dfff310002, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T00:56:59,870 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46075-0x10088dfff310001, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T00:56:59,870 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40687-0x10088dfff310003, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T00:56:59,870 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data null 2024-12-13T00:56:59,870 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data null 2024-12-13T00:56:59,870 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(135): Skipping permission cache refresh because writable data is empty 2024-12-13T00:56:59,870 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(135): Skipping permission cache refresh because writable data is empty 2024-12-13T00:56:59,871 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithTargetName/38b1a09d42abeda4002161e823a82c0d/recovered.edits/9.seqid to hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/archive/data/default/testtb-testExportWithTargetName/38b1a09d42abeda4002161e823a82c0d/recovered.edits/9.seqid 2024-12-13T00:56:59,871 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithTargetName/38b1a09d42abeda4002161e823a82c0d 2024-12-13T00:56:59,872 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithTargetName regions 2024-12-13T00:56:59,872 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-13T00:56:59,872 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T00:56:59,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=29 2024-12-13T00:56:59,876 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=29, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-13T00:56:59,884 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40687 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-12-13T00:56:59,892 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testExportWithTargetName from hbase:meta 2024-12-13T00:56:59,898 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportWithTargetName' descriptor. 2024-12-13T00:56:59,901 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=29, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-13T00:56:59,901 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportWithTargetName' from region states. 2024-12-13T00:56:59,902 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName,,1734051395546.38b1a09d42abeda4002161e823a82c0d.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734051419901"}]},"ts":"9223372036854775807"} 2024-12-13T00:56:59,902 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName,1,1734051395546.8774c2fcd6c1e98abff96f4c986ac930.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734051419901"}]},"ts":"9223372036854775807"} 2024-12-13T00:56:59,907 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-13T00:56:59,907 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 38b1a09d42abeda4002161e823a82c0d, NAME => 'testtb-testExportWithTargetName,,1734051395546.38b1a09d42abeda4002161e823a82c0d.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 8774c2fcd6c1e98abff96f4c986ac930, NAME => 'testtb-testExportWithTargetName,1,1734051395546.8774c2fcd6c1e98abff96f4c986ac930.', STARTKEY => '1', ENDKEY => ''}] 2024-12-13T00:56:59,907 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportWithTargetName' as deleted. 2024-12-13T00:56:59,907 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734051419907"}]},"ts":"9223372036854775807"} 2024-12-13T00:56:59,911 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testExportWithTargetName state from META 2024-12-13T00:56:59,974 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=29, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-13T00:56:59,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=29 2024-12-13T00:56:59,978 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=29, state=SUCCESS; DeleteTableProcedure table=testtb-testExportWithTargetName in 223 msec 2024-12-13T00:57:00,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=29 2024-12-13T00:57:00,179 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testExportWithTargetName, procId: 29 completed 2024-12-13T00:57:00,199 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithTargetName" 2024-12-13T00:57:00,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testExportWithTargetName 2024-12-13T00:57:00,206 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithTargetName" 2024-12-13T00:57:00,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testExportWithTargetName 2024-12-13T00:57:00,256 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportWithTargetName Thread=769 (was 719) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1349531497_22 at /127.0.0.1:47306 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/dfs/data/data3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/dfs/data/data1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x71b2f156-shared-pool-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Container metrics unregistration java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: Thread-1289 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1349531497_22 at /127.0.0.1:59440 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 22738) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1349531497_22 at /127.0.0.1:50534 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1941338361) connection to localhost/127.0.0.1:44961 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/dfs/data/data6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/dfs/data/data4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46229 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1941338361) connection to localhost/127.0.0.1:46229 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ContainersLauncher #0 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.read1(BufferedReader.java:213) java.base@17.0.11/java.io.BufferedReader.read(BufferedReader.java:287) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.parseExecResult(Shell.java:1295) app//org.apache.hadoop.util.Shell.runCommand(Shell.java:1054) app//org.apache.hadoop.util.Shell.run(Shell.java:959) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:1282) app//org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor.launchContainer(DefaultContainerExecutor.java:349) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.launchContainer(ContainerLaunch.java:600) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:388) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:105) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/dfs/data/data5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: htable-pool-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x71b2f156-shared-pool-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x71b2f156-shared-pool-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_432991721_1 at /127.0.0.1:59412 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/dfs/data/data2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x71b2f156-shared-pool-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44961 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_432991721_1 at /127.0.0.1:50502 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: htable-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=771 (was 751) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=518 (was 318) - SystemLoadAverage LEAK? -, ProcessCount=20 (was 11) - ProcessCount LEAK? -, AvailableMemoryMB=4693 (was 6596) 2024-12-13T00:57:00,256 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=769 is superior to 500 2024-12-13T00:57:00,281 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportWithResetTtl Thread=769, OpenFileDescriptor=771, MaxFileDescriptor=1048576, SystemLoadAverage=518, ProcessCount=20, AvailableMemoryMB=4691 2024-12-13T00:57:00,282 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=769 is superior to 500 2024-12-13T00:57:00,284 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-13T00:57:00,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] procedure2.ProcedureExecutor(1098): Stored pid=30, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testExportWithResetTtl 2024-12-13T00:57:00,287 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=30, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_PRE_OPERATION 2024-12-13T00:57:00,287 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T00:57:00,288 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithResetTtl" procId is: 30 2024-12-13T00:57:00,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-13T00:57:00,293 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=30, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-13T00:57:00,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741898_1074 (size=404) 2024-12-13T00:57:00,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741898_1074 (size=404) 2024-12-13T00:57:00,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741898_1074 (size=404) 2024-12-13T00:57:00,311 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => 30980404399d8ae8b8559ed36da41deb, NAME => 'testtb-testExportWithResetTtl,1,1734051420283.30980404399d8ae8b8559ed36da41deb.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee 2024-12-13T00:57:00,311 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 2b5681655899e82ccd476ae23464a76f, NAME => 'testtb-testExportWithResetTtl,,1734051420283.2b5681655899e82ccd476ae23464a76f.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee 2024-12-13T00:57:00,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741899_1075 (size=65) 2024-12-13T00:57:00,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741899_1075 (size=65) 2024-12-13T00:57:00,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741899_1075 (size=65) 2024-12-13T00:57:00,341 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testExportWithResetTtl,1,1734051420283.30980404399d8ae8b8559ed36da41deb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T00:57:00,341 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1681): Closing 30980404399d8ae8b8559ed36da41deb, disabling compactions & flushes 2024-12-13T00:57:00,341 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testExportWithResetTtl,1,1734051420283.30980404399d8ae8b8559ed36da41deb. 2024-12-13T00:57:00,341 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithResetTtl,1,1734051420283.30980404399d8ae8b8559ed36da41deb. 2024-12-13T00:57:00,341 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithResetTtl,1,1734051420283.30980404399d8ae8b8559ed36da41deb. after waiting 0 ms 2024-12-13T00:57:00,341 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithResetTtl,1,1734051420283.30980404399d8ae8b8559ed36da41deb. 2024-12-13T00:57:00,341 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testExportWithResetTtl,1,1734051420283.30980404399d8ae8b8559ed36da41deb. 2024-12-13T00:57:00,341 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1635): Region close journal for 30980404399d8ae8b8559ed36da41deb: 2024-12-13T00:57:00,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741900_1076 (size=65) 2024-12-13T00:57:00,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741900_1076 (size=65) 2024-12-13T00:57:00,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741900_1076 (size=65) 2024-12-13T00:57:00,347 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testExportWithResetTtl,,1734051420283.2b5681655899e82ccd476ae23464a76f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T00:57:00,347 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1681): Closing 2b5681655899e82ccd476ae23464a76f, disabling compactions & flushes 2024-12-13T00:57:00,347 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testExportWithResetTtl,,1734051420283.2b5681655899e82ccd476ae23464a76f. 2024-12-13T00:57:00,347 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithResetTtl,,1734051420283.2b5681655899e82ccd476ae23464a76f. 2024-12-13T00:57:00,347 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithResetTtl,,1734051420283.2b5681655899e82ccd476ae23464a76f. after waiting 0 ms 2024-12-13T00:57:00,347 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithResetTtl,,1734051420283.2b5681655899e82ccd476ae23464a76f. 2024-12-13T00:57:00,347 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testExportWithResetTtl,,1734051420283.2b5681655899e82ccd476ae23464a76f. 2024-12-13T00:57:00,347 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1635): Region close journal for 2b5681655899e82ccd476ae23464a76f: 2024-12-13T00:57:00,350 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=30, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_ADD_TO_META 2024-12-13T00:57:00,350 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportWithResetTtl,1,1734051420283.30980404399d8ae8b8559ed36da41deb.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1734051420350"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734051420350"}]},"ts":"1734051420350"} 2024-12-13T00:57:00,351 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportWithResetTtl,,1734051420283.2b5681655899e82ccd476ae23464a76f.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1734051420350"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734051420350"}]},"ts":"1734051420350"} 2024-12-13T00:57:00,356 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-13T00:57:00,357 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=30, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-13T00:57:00,358 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734051420357"}]},"ts":"1734051420357"} 2024-12-13T00:57:00,360 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithResetTtl, state=ENABLING in hbase:meta 2024-12-13T00:57:00,366 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(202): Hosts are {9812beb1e4cd=0} racks are {/default-rack=0} 2024-12-13T00:57:00,368 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-13T00:57:00,368 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-13T00:57:00,368 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-13T00:57:00,368 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-13T00:57:00,368 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-13T00:57:00,368 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-13T00:57:00,368 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-13T00:57:00,369 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=31, ppid=30, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=2b5681655899e82ccd476ae23464a76f, ASSIGN}, {pid=32, ppid=30, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=30980404399d8ae8b8559ed36da41deb, ASSIGN}] 2024-12-13T00:57:00,371 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=32, ppid=30, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=30980404399d8ae8b8559ed36da41deb, ASSIGN 2024-12-13T00:57:00,371 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=31, ppid=30, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=2b5681655899e82ccd476ae23464a76f, ASSIGN 2024-12-13T00:57:00,373 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=31, ppid=30, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=2b5681655899e82ccd476ae23464a76f, ASSIGN; state=OFFLINE, location=9812beb1e4cd,40687,1734051385054; forceNewPlan=false, retain=false 2024-12-13T00:57:00,373 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=32, ppid=30, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=30980404399d8ae8b8559ed36da41deb, ASSIGN; state=OFFLINE, location=9812beb1e4cd,45231,1734051384968; forceNewPlan=false, retain=false 2024-12-13T00:57:00,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-13T00:57:00,524 INFO [9812beb1e4cd:38293 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-13T00:57:00,524 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=32 updating hbase:meta row=30980404399d8ae8b8559ed36da41deb, regionState=OPENING, regionLocation=9812beb1e4cd,45231,1734051384968 2024-12-13T00:57:00,524 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=31 updating hbase:meta row=2b5681655899e82ccd476ae23464a76f, regionState=OPENING, regionLocation=9812beb1e4cd,40687,1734051385054 2024-12-13T00:57:00,529 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=33, ppid=32, state=RUNNABLE; OpenRegionProcedure 30980404399d8ae8b8559ed36da41deb, server=9812beb1e4cd,45231,1734051384968}] 2024-12-13T00:57:00,531 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=34, ppid=31, state=RUNNABLE; OpenRegionProcedure 2b5681655899e82ccd476ae23464a76f, server=9812beb1e4cd,40687,1734051385054}] 2024-12-13T00:57:00,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-13T00:57:00,685 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,45231,1734051384968 2024-12-13T00:57:00,685 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,40687,1734051385054 2024-12-13T00:57:00,689 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] handler.AssignRegionHandler(135): Open testtb-testExportWithResetTtl,1,1734051420283.30980404399d8ae8b8559ed36da41deb. 2024-12-13T00:57:00,690 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(7285): Opening region: {ENCODED => 30980404399d8ae8b8559ed36da41deb, NAME => 'testtb-testExportWithResetTtl,1,1734051420283.30980404399d8ae8b8559ed36da41deb.', STARTKEY => '1', ENDKEY => ''} 2024-12-13T00:57:00,690 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportWithResetTtl,1,1734051420283.30980404399d8ae8b8559ed36da41deb. service=AccessControlService 2024-12-13T00:57:00,690 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-13T00:57:00,691 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithResetTtl 30980404399d8ae8b8559ed36da41deb 2024-12-13T00:57:00,691 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(894): Instantiated testtb-testExportWithResetTtl,1,1734051420283.30980404399d8ae8b8559ed36da41deb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T00:57:00,691 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(7327): checking encryption for 30980404399d8ae8b8559ed36da41deb 2024-12-13T00:57:00,691 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(7330): checking classloading for 30980404399d8ae8b8559ed36da41deb 2024-12-13T00:57:00,689 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] handler.AssignRegionHandler(135): Open testtb-testExportWithResetTtl,,1734051420283.2b5681655899e82ccd476ae23464a76f. 2024-12-13T00:57:00,692 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(7285): Opening region: {ENCODED => 2b5681655899e82ccd476ae23464a76f, NAME => 'testtb-testExportWithResetTtl,,1734051420283.2b5681655899e82ccd476ae23464a76f.', STARTKEY => '', ENDKEY => '1'} 2024-12-13T00:57:00,693 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportWithResetTtl,,1734051420283.2b5681655899e82ccd476ae23464a76f. service=AccessControlService 2024-12-13T00:57:00,693 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-13T00:57:00,693 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithResetTtl 2b5681655899e82ccd476ae23464a76f 2024-12-13T00:57:00,693 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(894): Instantiated testtb-testExportWithResetTtl,,1734051420283.2b5681655899e82ccd476ae23464a76f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T00:57:00,693 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(7327): checking encryption for 2b5681655899e82ccd476ae23464a76f 2024-12-13T00:57:00,693 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(7330): checking classloading for 2b5681655899e82ccd476ae23464a76f 2024-12-13T00:57:00,695 INFO [StoreOpener-30980404399d8ae8b8559ed36da41deb-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 30980404399d8ae8b8559ed36da41deb 2024-12-13T00:57:00,696 INFO [StoreOpener-2b5681655899e82ccd476ae23464a76f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 2b5681655899e82ccd476ae23464a76f 2024-12-13T00:57:00,698 INFO [StoreOpener-30980404399d8ae8b8559ed36da41deb-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 30980404399d8ae8b8559ed36da41deb columnFamilyName cf 2024-12-13T00:57:00,698 DEBUG [StoreOpener-30980404399d8ae8b8559ed36da41deb-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T00:57:00,699 INFO [StoreOpener-30980404399d8ae8b8559ed36da41deb-1 {}] regionserver.HStore(327): Store=30980404399d8ae8b8559ed36da41deb/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T00:57:00,700 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithResetTtl/30980404399d8ae8b8559ed36da41deb 2024-12-13T00:57:00,701 INFO [StoreOpener-2b5681655899e82ccd476ae23464a76f-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2b5681655899e82ccd476ae23464a76f columnFamilyName cf 2024-12-13T00:57:00,701 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithResetTtl/30980404399d8ae8b8559ed36da41deb 2024-12-13T00:57:00,701 DEBUG [StoreOpener-2b5681655899e82ccd476ae23464a76f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T00:57:00,702 INFO [StoreOpener-2b5681655899e82ccd476ae23464a76f-1 {}] regionserver.HStore(327): Store=2b5681655899e82ccd476ae23464a76f/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T00:57:00,703 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithResetTtl/2b5681655899e82ccd476ae23464a76f 2024-12-13T00:57:00,704 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithResetTtl/2b5681655899e82ccd476ae23464a76f 2024-12-13T00:57:00,705 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(1085): writing seq id for 30980404399d8ae8b8559ed36da41deb 2024-12-13T00:57:00,707 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(1085): writing seq id for 2b5681655899e82ccd476ae23464a76f 2024-12-13T00:57:00,709 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithResetTtl/30980404399d8ae8b8559ed36da41deb/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T00:57:00,709 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(1102): Opened 30980404399d8ae8b8559ed36da41deb; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64986463, jitterRate=-0.03162623941898346}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-13T00:57:00,711 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(1001): Region open journal for 30980404399d8ae8b8559ed36da41deb: 2024-12-13T00:57:00,712 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithResetTtl/2b5681655899e82ccd476ae23464a76f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T00:57:00,713 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportWithResetTtl,1,1734051420283.30980404399d8ae8b8559ed36da41deb., pid=33, masterSystemTime=1734051420685 2024-12-13T00:57:00,713 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(1102): Opened 2b5681655899e82ccd476ae23464a76f; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62531908, jitterRate=-0.0682019591331482}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-13T00:57:00,713 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(1001): Region open journal for 2b5681655899e82ccd476ae23464a76f: 2024-12-13T00:57:00,714 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportWithResetTtl,,1734051420283.2b5681655899e82ccd476ae23464a76f., pid=34, masterSystemTime=1734051420685 2024-12-13T00:57:00,715 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportWithResetTtl,1,1734051420283.30980404399d8ae8b8559ed36da41deb. 2024-12-13T00:57:00,716 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] handler.AssignRegionHandler(164): Opened testtb-testExportWithResetTtl,1,1734051420283.30980404399d8ae8b8559ed36da41deb. 2024-12-13T00:57:00,717 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=32 updating hbase:meta row=30980404399d8ae8b8559ed36da41deb, regionState=OPEN, openSeqNum=2, regionLocation=9812beb1e4cd,45231,1734051384968 2024-12-13T00:57:00,717 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportWithResetTtl,,1734051420283.2b5681655899e82ccd476ae23464a76f. 2024-12-13T00:57:00,717 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] handler.AssignRegionHandler(164): Opened testtb-testExportWithResetTtl,,1734051420283.2b5681655899e82ccd476ae23464a76f. 2024-12-13T00:57:00,718 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=31 updating hbase:meta row=2b5681655899e82ccd476ae23464a76f, regionState=OPEN, openSeqNum=2, regionLocation=9812beb1e4cd,40687,1734051385054 2024-12-13T00:57:00,724 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=33, resume processing ppid=32 2024-12-13T00:57:00,725 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=33, ppid=32, state=SUCCESS; OpenRegionProcedure 30980404399d8ae8b8559ed36da41deb, server=9812beb1e4cd,45231,1734051384968 in 191 msec 2024-12-13T00:57:00,730 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=34, resume processing ppid=31 2024-12-13T00:57:00,732 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=34, ppid=31, state=SUCCESS; OpenRegionProcedure 2b5681655899e82ccd476ae23464a76f, server=9812beb1e4cd,40687,1734051385054 in 191 msec 2024-12-13T00:57:00,732 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=32, ppid=30, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=30980404399d8ae8b8559ed36da41deb, ASSIGN in 356 msec 2024-12-13T00:57:00,735 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=31, resume processing ppid=30 2024-12-13T00:57:00,735 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=31, ppid=30, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=2b5681655899e82ccd476ae23464a76f, ASSIGN in 362 msec 2024-12-13T00:57:00,736 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=30, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-13T00:57:00,736 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734051420736"}]},"ts":"1734051420736"} 2024-12-13T00:57:00,738 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithResetTtl, state=ENABLED in hbase:meta 2024-12-13T00:57:00,741 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=30, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_POST_OPERATION 2024-12-13T00:57:00,742 DEBUG [PEWorker-4 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testExportWithResetTtl jenkins: RWXCA 2024-12-13T00:57:00,744 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40687 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-13T00:57:00,746 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46075-0x10088dfff310001, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T00:57:00,746 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T00:57:00,746 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45231-0x10088dfff310002, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T00:57:00,747 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40687-0x10088dfff310003, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T00:57:00,749 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-13T00:57:00,749 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-13T00:57:00,750 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-13T00:57:00,750 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-13T00:57:00,752 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=30, state=SUCCESS; CreateTableProcedure table=testtb-testExportWithResetTtl in 465 msec 2024-12-13T00:57:00,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-13T00:57:00,894 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testExportWithResetTtl, procId: 30 completed 2024-12-13T00:57:00,894 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testtb-testExportWithResetTtl get assigned. Timeout = 60000ms 2024-12-13T00:57:00,894 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-13T00:57:00,898 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testtb-testExportWithResetTtl assigned to meta. Checking AM states. 2024-12-13T00:57:00,899 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-13T00:57:00,899 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testtb-testExportWithResetTtl assigned. 2024-12-13T00:57:00,919 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-13T00:57:00,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734051420919 (current time:1734051420919). 2024-12-13T00:57:00,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-13T00:57:00,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testExportWithResetTtl VERSION not specified, setting to 2 2024-12-13T00:57:00,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-13T00:57:00,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x657012fb to 127.0.0.1:49424 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4ebfef30 2024-12-13T00:57:00,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@22ec8420, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T00:57:00,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T00:57:00,938 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60254, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T00:57:00,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x657012fb to 127.0.0.1:49424 2024-12-13T00:57:00,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T00:57:00,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1f0021ba to 127.0.0.1:49424 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2ef79b05 2024-12-13T00:57:00,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@68fcc77f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T00:57:00,947 DEBUG [hconnection-0x28a8df0-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T00:57:00,948 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60260, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T00:57:00,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1f0021ba to 127.0.0.1:49424 2024-12-13T00:57:00,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T00:57:00,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-13T00:57:00,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-13T00:57:00,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] procedure2.ProcedureExecutor(1098): Stored pid=35, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-13T00:57:00,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 35 2024-12-13T00:57:00,956 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-13T00:57:00,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-12-13T00:57:00,957 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-13T00:57:00,961 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-13T00:57:00,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741901_1077 (size=161) 2024-12-13T00:57:00,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741901_1077 (size=161) 2024-12-13T00:57:00,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741901_1077 (size=161) 2024-12-13T00:57:00,972 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-13T00:57:00,972 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=36, ppid=35, state=RUNNABLE; SnapshotRegionProcedure 2b5681655899e82ccd476ae23464a76f}, {pid=37, ppid=35, state=RUNNABLE; SnapshotRegionProcedure 30980404399d8ae8b8559ed36da41deb}] 2024-12-13T00:57:00,973 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=36, ppid=35, state=RUNNABLE; SnapshotRegionProcedure 2b5681655899e82ccd476ae23464a76f 2024-12-13T00:57:00,973 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=37, ppid=35, state=RUNNABLE; SnapshotRegionProcedure 30980404399d8ae8b8559ed36da41deb 2024-12-13T00:57:01,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-12-13T00:57:01,125 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,40687,1734051385054 2024-12-13T00:57:01,125 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,45231,1734051384968 2024-12-13T00:57:01,126 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45231 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=37 2024-12-13T00:57:01,126 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40687 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=36 2024-12-13T00:57:01,127 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=37}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,1,1734051420283.30980404399d8ae8b8559ed36da41deb. 2024-12-13T00:57:01,127 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=36}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,,1734051420283.2b5681655899e82ccd476ae23464a76f. 2024-12-13T00:57:01,127 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=37}] regionserver.HRegion(2538): Flush status journal for 30980404399d8ae8b8559ed36da41deb: 2024-12-13T00:57:01,127 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=36}] regionserver.HRegion(2538): Flush status journal for 2b5681655899e82ccd476ae23464a76f: 2024-12-13T00:57:01,127 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=37}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,1,1734051420283.30980404399d8ae8b8559ed36da41deb. for emptySnaptb0-testExportWithResetTtl completed. 2024-12-13T00:57:01,127 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=36}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,,1734051420283.2b5681655899e82ccd476ae23464a76f. for emptySnaptb0-testExportWithResetTtl completed. 2024-12-13T00:57:01,128 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=37}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,1,1734051420283.30980404399d8ae8b8559ed36da41deb.' region-info for snapshot=emptySnaptb0-testExportWithResetTtl 2024-12-13T00:57:01,128 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=37}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-13T00:57:01,128 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=36}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,,1734051420283.2b5681655899e82ccd476ae23464a76f.' region-info for snapshot=emptySnaptb0-testExportWithResetTtl 2024-12-13T00:57:01,128 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=37}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-13T00:57:01,128 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=36}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-13T00:57:01,128 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=36}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-13T00:57:01,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741903_1079 (size=68) 2024-12-13T00:57:01,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741903_1079 (size=68) 2024-12-13T00:57:01,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741902_1078 (size=68) 2024-12-13T00:57:01,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741903_1079 (size=68) 2024-12-13T00:57:01,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741902_1078 (size=68) 2024-12-13T00:57:01,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741902_1078 (size=68) 2024-12-13T00:57:01,142 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=36}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,,1734051420283.2b5681655899e82ccd476ae23464a76f. 2024-12-13T00:57:01,143 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=36}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=36 2024-12-13T00:57:01,143 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=37}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,1,1734051420283.30980404399d8ae8b8559ed36da41deb. 2024-12-13T00:57:01,143 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=37}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=37 2024-12-13T00:57:01,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] master.HMaster(4106): Remote procedure done, pid=36 2024-12-13T00:57:01,143 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithResetTtl on region 2b5681655899e82ccd476ae23464a76f 2024-12-13T00:57:01,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.HMaster(4106): Remote procedure done, pid=37 2024-12-13T00:57:01,144 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithResetTtl on region 30980404399d8ae8b8559ed36da41deb 2024-12-13T00:57:01,144 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=36, ppid=35, state=RUNNABLE; SnapshotRegionProcedure 2b5681655899e82ccd476ae23464a76f 2024-12-13T00:57:01,144 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=37, ppid=35, state=RUNNABLE; SnapshotRegionProcedure 30980404399d8ae8b8559ed36da41deb 2024-12-13T00:57:01,148 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=36, ppid=35, state=SUCCESS; SnapshotRegionProcedure 2b5681655899e82ccd476ae23464a76f in 173 msec 2024-12-13T00:57:01,149 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=37, resume processing ppid=35 2024-12-13T00:57:01,149 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=37, ppid=35, state=SUCCESS; SnapshotRegionProcedure 30980404399d8ae8b8559ed36da41deb in 173 msec 2024-12-13T00:57:01,149 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-13T00:57:01,150 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-13T00:57:01,151 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-13T00:57:01,151 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithResetTtl 2024-12-13T00:57:01,152 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithResetTtl 2024-12-13T00:57:01,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741904_1080 (size=543) 2024-12-13T00:57:01,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741904_1080 (size=543) 2024-12-13T00:57:01,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741904_1080 (size=543) 2024-12-13T00:57:01,181 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-13T00:57:01,189 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-13T00:57:01,190 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithResetTtl to hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/emptySnaptb0-testExportWithResetTtl 2024-12-13T00:57:01,193 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-13T00:57:01,193 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 35 2024-12-13T00:57:01,196 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=35, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } in 239 msec 2024-12-13T00:57:01,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-12-13T00:57:01,259 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithResetTtl, procId: 35 completed 2024-12-13T00:57:01,270 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40687 {}] regionserver.HRegion(8254): writing data to region testtb-testExportWithResetTtl,,1734051420283.2b5681655899e82ccd476ae23464a76f. with WAL disabled. Data may be lost in the event of a crash. 2024-12-13T00:57:01,273 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45231 {}] regionserver.HRegion(8254): writing data to region testtb-testExportWithResetTtl,1,1734051420283.30980404399d8ae8b8559ed36da41deb. with WAL disabled. Data may be lost in the event of a crash. 2024-12-13T00:57:01,281 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testExportWithResetTtl 2024-12-13T00:57:01,281 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testExportWithResetTtl,,1734051420283.2b5681655899e82ccd476ae23464a76f. 2024-12-13T00:57:01,281 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-13T00:57:01,297 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-13T00:57:01,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734051421297 (current time:1734051421297). 2024-12-13T00:57:01,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-13T00:57:01,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testExportWithResetTtl VERSION not specified, setting to 2 2024-12-13T00:57:01,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-13T00:57:01,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7fef4d1b to 127.0.0.1:49424 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7128bc96 2024-12-13T00:57:01,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7517266b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T00:57:01,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T00:57:01,306 INFO [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60266, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T00:57:01,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7fef4d1b to 127.0.0.1:49424 2024-12-13T00:57:01,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T00:57:01,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4d873e21 to 127.0.0.1:49424 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@503a49e2 2024-12-13T00:57:01,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5683531a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T00:57:01,320 DEBUG [hconnection-0x54747571-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T00:57:01,322 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60272, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T00:57:01,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4d873e21 to 127.0.0.1:49424 2024-12-13T00:57:01,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T00:57:01,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-13T00:57:01,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-13T00:57:01,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] procedure2.ProcedureExecutor(1098): Stored pid=38, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-13T00:57:01,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 38 2024-12-13T00:57:01,329 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-13T00:57:01,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-12-13T00:57:01,330 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-13T00:57:01,334 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-13T00:57:01,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741905_1081 (size=156) 2024-12-13T00:57:01,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741905_1081 (size=156) 2024-12-13T00:57:01,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741905_1081 (size=156) 2024-12-13T00:57:01,348 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-13T00:57:01,348 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=39, ppid=38, state=RUNNABLE; SnapshotRegionProcedure 2b5681655899e82ccd476ae23464a76f}, {pid=40, ppid=38, state=RUNNABLE; SnapshotRegionProcedure 30980404399d8ae8b8559ed36da41deb}] 2024-12-13T00:57:01,350 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=40, ppid=38, state=RUNNABLE; SnapshotRegionProcedure 30980404399d8ae8b8559ed36da41deb 2024-12-13T00:57:01,350 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=39, ppid=38, state=RUNNABLE; SnapshotRegionProcedure 2b5681655899e82ccd476ae23464a76f 2024-12-13T00:57:01,376 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-13T00:57:01,377 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-13T00:57:01,377 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46520, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-13T00:57:01,378 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46532, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-13T00:57:01,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-12-13T00:57:01,501 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,45231,1734051384968 2024-12-13T00:57:01,501 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,40687,1734051385054 2024-12-13T00:57:01,502 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45231 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=40 2024-12-13T00:57:01,502 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40687 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=39 2024-12-13T00:57:01,502 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,1,1734051420283.30980404399d8ae8b8559ed36da41deb. 2024-12-13T00:57:01,502 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,,1734051420283.2b5681655899e82ccd476ae23464a76f. 2024-12-13T00:57:01,503 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.HRegion(2837): Flushing 2b5681655899e82ccd476ae23464a76f 1/1 column families, dataSize=266 B heapSize=832 B 2024-12-13T00:57:01,503 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.HRegion(2837): Flushing 30980404399d8ae8b8559ed36da41deb 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-12-13T00:57:01,527 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithResetTtl/2b5681655899e82ccd476ae23464a76f/.tmp/cf/f3a4129e7981480b9ae55058dba51c65 is 71, key is 035a3d3b7e993629b5258bdaeaf452e4/cf:q/1734051421270/Put/seqid=0 2024-12-13T00:57:01,529 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithResetTtl/30980404399d8ae8b8559ed36da41deb/.tmp/cf/01cd4e65725d425cb7c2a3efffb5fef0 is 71, key is 134d37aa5928c93e46aa50a5dcd024de/cf:q/1734051421273/Put/seqid=0 2024-12-13T00:57:01,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741906_1082 (size=5354) 2024-12-13T00:57:01,544 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=266 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithResetTtl/2b5681655899e82ccd476ae23464a76f/.tmp/cf/f3a4129e7981480b9ae55058dba51c65 2024-12-13T00:57:01,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741906_1082 (size=5354) 2024-12-13T00:57:01,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741906_1082 (size=5354) 2024-12-13T00:57:01,553 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithResetTtl/2b5681655899e82ccd476ae23464a76f/.tmp/cf/f3a4129e7981480b9ae55058dba51c65 as hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithResetTtl/2b5681655899e82ccd476ae23464a76f/cf/f3a4129e7981480b9ae55058dba51c65 2024-12-13T00:57:01,562 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithResetTtl/2b5681655899e82ccd476ae23464a76f/cf/f3a4129e7981480b9ae55058dba51c65, entries=4, sequenceid=6, filesize=5.2 K 2024-12-13T00:57:01,563 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.HRegion(3040): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for 2b5681655899e82ccd476ae23464a76f in 60ms, sequenceid=6, compaction requested=false 2024-12-13T00:57:01,563 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithResetTtl' 2024-12-13T00:57:01,564 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.HRegion(2538): Flush status journal for 2b5681655899e82ccd476ae23464a76f: 2024-12-13T00:57:01,565 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,,1734051420283.2b5681655899e82ccd476ae23464a76f. for snaptb0-testExportWithResetTtl completed. 2024-12-13T00:57:01,565 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,,1734051420283.2b5681655899e82ccd476ae23464a76f.' region-info for snapshot=snaptb0-testExportWithResetTtl 2024-12-13T00:57:01,565 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-13T00:57:01,565 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithResetTtl/2b5681655899e82ccd476ae23464a76f/cf/f3a4129e7981480b9ae55058dba51c65] hfiles 2024-12-13T00:57:01,565 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithResetTtl/2b5681655899e82ccd476ae23464a76f/cf/f3a4129e7981480b9ae55058dba51c65 for snapshot=snaptb0-testExportWithResetTtl 2024-12-13T00:57:01,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741907_1083 (size=8256) 2024-12-13T00:57:01,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741907_1083 (size=8256) 2024-12-13T00:57:01,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741907_1083 (size=8256) 2024-12-13T00:57:01,572 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.00 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithResetTtl/30980404399d8ae8b8559ed36da41deb/.tmp/cf/01cd4e65725d425cb7c2a3efffb5fef0 2024-12-13T00:57:01,580 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithResetTtl/30980404399d8ae8b8559ed36da41deb/.tmp/cf/01cd4e65725d425cb7c2a3efffb5fef0 as hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithResetTtl/30980404399d8ae8b8559ed36da41deb/cf/01cd4e65725d425cb7c2a3efffb5fef0 2024-12-13T00:57:01,587 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithResetTtl/30980404399d8ae8b8559ed36da41deb/cf/01cd4e65725d425cb7c2a3efffb5fef0, entries=46, sequenceid=6, filesize=8.1 K 2024-12-13T00:57:01,588 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.HRegion(3040): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for 30980404399d8ae8b8559ed36da41deb in 85ms, sequenceid=6, compaction requested=false 2024-12-13T00:57:01,588 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.HRegion(2538): Flush status journal for 30980404399d8ae8b8559ed36da41deb: 2024-12-13T00:57:01,588 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,1,1734051420283.30980404399d8ae8b8559ed36da41deb. for snaptb0-testExportWithResetTtl completed. 2024-12-13T00:57:01,589 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,1,1734051420283.30980404399d8ae8b8559ed36da41deb.' region-info for snapshot=snaptb0-testExportWithResetTtl 2024-12-13T00:57:01,589 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-13T00:57:01,589 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithResetTtl/30980404399d8ae8b8559ed36da41deb/cf/01cd4e65725d425cb7c2a3efffb5fef0] hfiles 2024-12-13T00:57:01,589 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithResetTtl/30980404399d8ae8b8559ed36da41deb/cf/01cd4e65725d425cb7c2a3efffb5fef0 for snapshot=snaptb0-testExportWithResetTtl 2024-12-13T00:57:01,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741908_1084 (size=107) 2024-12-13T00:57:01,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741908_1084 (size=107) 2024-12-13T00:57:01,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741908_1084 (size=107) 2024-12-13T00:57:01,602 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,,1734051420283.2b5681655899e82ccd476ae23464a76f. 2024-12-13T00:57:01,602 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=39 2024-12-13T00:57:01,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.HMaster(4106): Remote procedure done, pid=39 2024-12-13T00:57:01,603 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithResetTtl on region 2b5681655899e82ccd476ae23464a76f 2024-12-13T00:57:01,603 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=39, ppid=38, state=RUNNABLE; SnapshotRegionProcedure 2b5681655899e82ccd476ae23464a76f 2024-12-13T00:57:01,605 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=39, ppid=38, state=SUCCESS; SnapshotRegionProcedure 2b5681655899e82ccd476ae23464a76f in 256 msec 2024-12-13T00:57:01,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741909_1085 (size=107) 2024-12-13T00:57:01,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741909_1085 (size=107) 2024-12-13T00:57:01,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741909_1085 (size=107) 2024-12-13T00:57:01,613 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,1,1734051420283.30980404399d8ae8b8559ed36da41deb. 2024-12-13T00:57:01,613 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=40 2024-12-13T00:57:01,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.HMaster(4106): Remote procedure done, pid=40 2024-12-13T00:57:01,614 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithResetTtl on region 30980404399d8ae8b8559ed36da41deb 2024-12-13T00:57:01,614 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=40, ppid=38, state=RUNNABLE; SnapshotRegionProcedure 30980404399d8ae8b8559ed36da41deb 2024-12-13T00:57:01,618 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=40, resume processing ppid=38 2024-12-13T00:57:01,618 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=40, ppid=38, state=SUCCESS; SnapshotRegionProcedure 30980404399d8ae8b8559ed36da41deb in 267 msec 2024-12-13T00:57:01,618 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-13T00:57:01,619 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-13T00:57:01,619 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-13T00:57:01,620 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithResetTtl 2024-12-13T00:57:01,621 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/.tmp/snaptb0-testExportWithResetTtl 2024-12-13T00:57:01,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-12-13T00:57:01,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741910_1086 (size=621) 2024-12-13T00:57:01,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741910_1086 (size=621) 2024-12-13T00:57:01,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741910_1086 (size=621) 2024-12-13T00:57:01,650 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-13T00:57:01,658 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-13T00:57:01,658 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/.tmp/snaptb0-testExportWithResetTtl to hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/snaptb0-testExportWithResetTtl 2024-12-13T00:57:01,660 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-13T00:57:01,660 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 38 2024-12-13T00:57:01,662 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=38, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } in 333 msec 2024-12-13T00:57:01,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-12-13T00:57:01,935 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithResetTtl, procId: 38 completed 2024-12-13T00:57:01,937 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-13T00:57:01,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] procedure2.ProcedureExecutor(1098): Stored pid=41, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testExportWithResetTtl 2024-12-13T00:57:01,940 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_PRE_OPERATION 2024-12-13T00:57:01,940 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T00:57:01,940 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testExportWithResetTtl" procId is: 41 2024-12-13T00:57:01,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-13T00:57:01,941 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-13T00:57:01,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741911_1087 (size=397) 2024-12-13T00:57:01,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741911_1087 (size=397) 2024-12-13T00:57:01,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741911_1087 (size=397) 2024-12-13T00:57:01,970 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => 8e8891b618f49dd148e2915558bb2c48, NAME => 'testExportWithResetTtl,1,1734051421936.8e8891b618f49dd148e2915558bb2c48.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee 2024-12-13T00:57:01,970 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => cef781983bab36286c8a20673ce80a17, NAME => 'testExportWithResetTtl,,1734051421936.cef781983bab36286c8a20673ce80a17.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee 2024-12-13T00:57:01,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741912_1088 (size=58) 2024-12-13T00:57:01,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741912_1088 (size=58) 2024-12-13T00:57:01,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741912_1088 (size=58) 2024-12-13T00:57:01,996 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(894): Instantiated testExportWithResetTtl,1,1734051421936.8e8891b618f49dd148e2915558bb2c48.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T00:57:01,996 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1681): Closing 8e8891b618f49dd148e2915558bb2c48, disabling compactions & flushes 2024-12-13T00:57:01,996 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1703): Closing region testExportWithResetTtl,1,1734051421936.8e8891b618f49dd148e2915558bb2c48. 2024-12-13T00:57:01,996 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testExportWithResetTtl,1,1734051421936.8e8891b618f49dd148e2915558bb2c48. 2024-12-13T00:57:01,996 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testExportWithResetTtl,1,1734051421936.8e8891b618f49dd148e2915558bb2c48. after waiting 0 ms 2024-12-13T00:57:01,996 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testExportWithResetTtl,1,1734051421936.8e8891b618f49dd148e2915558bb2c48. 2024-12-13T00:57:01,996 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1922): Closed testExportWithResetTtl,1,1734051421936.8e8891b618f49dd148e2915558bb2c48. 2024-12-13T00:57:01,996 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1635): Region close journal for 8e8891b618f49dd148e2915558bb2c48: 2024-12-13T00:57:02,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741913_1089 (size=58) 2024-12-13T00:57:02,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741913_1089 (size=58) 2024-12-13T00:57:02,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741913_1089 (size=58) 2024-12-13T00:57:02,005 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(894): Instantiated testExportWithResetTtl,,1734051421936.cef781983bab36286c8a20673ce80a17.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T00:57:02,005 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1681): Closing cef781983bab36286c8a20673ce80a17, disabling compactions & flushes 2024-12-13T00:57:02,005 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1703): Closing region testExportWithResetTtl,,1734051421936.cef781983bab36286c8a20673ce80a17. 2024-12-13T00:57:02,005 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testExportWithResetTtl,,1734051421936.cef781983bab36286c8a20673ce80a17. 2024-12-13T00:57:02,005 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testExportWithResetTtl,,1734051421936.cef781983bab36286c8a20673ce80a17. after waiting 0 ms 2024-12-13T00:57:02,005 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testExportWithResetTtl,,1734051421936.cef781983bab36286c8a20673ce80a17. 2024-12-13T00:57:02,006 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1922): Closed testExportWithResetTtl,,1734051421936.cef781983bab36286c8a20673ce80a17. 2024-12-13T00:57:02,006 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1635): Region close journal for cef781983bab36286c8a20673ce80a17: 2024-12-13T00:57:02,007 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_ADD_TO_META 2024-12-13T00:57:02,007 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testExportWithResetTtl,1,1734051421936.8e8891b618f49dd148e2915558bb2c48.","families":{"info":[{"qualifier":"regioninfo","vlen":57,"tag":[],"timestamp":"1734051422007"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734051422007"}]},"ts":"1734051422007"} 2024-12-13T00:57:02,008 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testExportWithResetTtl,,1734051421936.cef781983bab36286c8a20673ce80a17.","families":{"info":[{"qualifier":"regioninfo","vlen":57,"tag":[],"timestamp":"1734051422007"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734051422007"}]},"ts":"1734051422007"} 2024-12-13T00:57:02,011 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-13T00:57:02,012 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-13T00:57:02,012 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734051422012"}]},"ts":"1734051422012"} 2024-12-13T00:57:02,014 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testExportWithResetTtl, state=ENABLING in hbase:meta 2024-12-13T00:57:02,018 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(202): Hosts are {9812beb1e4cd=0} racks are {/default-rack=0} 2024-12-13T00:57:02,019 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-13T00:57:02,019 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-13T00:57:02,019 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-13T00:57:02,019 INFO [PEWorker-2 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-13T00:57:02,019 INFO [PEWorker-2 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-13T00:57:02,019 INFO [PEWorker-2 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-13T00:57:02,019 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-13T00:57:02,020 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testExportWithResetTtl, region=cef781983bab36286c8a20673ce80a17, ASSIGN}, {pid=43, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testExportWithResetTtl, region=8e8891b618f49dd148e2915558bb2c48, ASSIGN}] 2024-12-13T00:57:02,021 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=43, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testExportWithResetTtl, region=8e8891b618f49dd148e2915558bb2c48, ASSIGN 2024-12-13T00:57:02,021 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testExportWithResetTtl, region=cef781983bab36286c8a20673ce80a17, ASSIGN 2024-12-13T00:57:02,022 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=cef781983bab36286c8a20673ce80a17, ASSIGN; state=OFFLINE, location=9812beb1e4cd,45231,1734051384968; forceNewPlan=false, retain=false 2024-12-13T00:57:02,022 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=43, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=8e8891b618f49dd148e2915558bb2c48, ASSIGN; state=OFFLINE, location=9812beb1e4cd,46075,1734051384875; forceNewPlan=false, retain=false 2024-12-13T00:57:02,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-13T00:57:02,173 INFO [9812beb1e4cd:38293 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-13T00:57:02,173 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=42 updating hbase:meta row=cef781983bab36286c8a20673ce80a17, regionState=OPENING, regionLocation=9812beb1e4cd,45231,1734051384968 2024-12-13T00:57:02,174 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=43 updating hbase:meta row=8e8891b618f49dd148e2915558bb2c48, regionState=OPENING, regionLocation=9812beb1e4cd,46075,1734051384875 2024-12-13T00:57:02,176 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=44, ppid=42, state=RUNNABLE; OpenRegionProcedure cef781983bab36286c8a20673ce80a17, server=9812beb1e4cd,45231,1734051384968}] 2024-12-13T00:57:02,178 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=45, ppid=43, state=RUNNABLE; OpenRegionProcedure 8e8891b618f49dd148e2915558bb2c48, server=9812beb1e4cd,46075,1734051384875}] 2024-12-13T00:57:02,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-13T00:57:02,329 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,45231,1734051384968 2024-12-13T00:57:02,331 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,46075,1734051384875 2024-12-13T00:57:02,332 DEBUG [RSProcedureDispatcher-pool-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-13T00:57:02,335 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] handler.AssignRegionHandler(135): Open testExportWithResetTtl,,1734051421936.cef781983bab36286c8a20673ce80a17. 2024-12-13T00:57:02,335 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(7285): Opening region: {ENCODED => cef781983bab36286c8a20673ce80a17, NAME => 'testExportWithResetTtl,,1734051421936.cef781983bab36286c8a20673ce80a17.', STARTKEY => '', ENDKEY => '1'} 2024-12-13T00:57:02,336 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(7999): Registered coprocessor service: region=testExportWithResetTtl,,1734051421936.cef781983bab36286c8a20673ce80a17. service=AccessControlService 2024-12-13T00:57:02,336 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40626, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-13T00:57:02,336 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-13T00:57:02,336 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportWithResetTtl cef781983bab36286c8a20673ce80a17 2024-12-13T00:57:02,336 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(894): Instantiated testExportWithResetTtl,,1734051421936.cef781983bab36286c8a20673ce80a17.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T00:57:02,336 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(7327): checking encryption for cef781983bab36286c8a20673ce80a17 2024-12-13T00:57:02,337 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(7330): checking classloading for cef781983bab36286c8a20673ce80a17 2024-12-13T00:57:02,344 INFO [StoreOpener-cef781983bab36286c8a20673ce80a17-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region cef781983bab36286c8a20673ce80a17 2024-12-13T00:57:02,347 INFO [StoreOpener-cef781983bab36286c8a20673ce80a17-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region cef781983bab36286c8a20673ce80a17 columnFamilyName cf 2024-12-13T00:57:02,347 DEBUG [StoreOpener-cef781983bab36286c8a20673ce80a17-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T00:57:02,347 INFO [StoreOpener-cef781983bab36286c8a20673ce80a17-1 {}] regionserver.HStore(327): Store=cef781983bab36286c8a20673ce80a17/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T00:57:02,348 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testExportWithResetTtl/cef781983bab36286c8a20673ce80a17 2024-12-13T00:57:02,349 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testExportWithResetTtl/cef781983bab36286c8a20673ce80a17 2024-12-13T00:57:02,354 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(1085): writing seq id for cef781983bab36286c8a20673ce80a17 2024-12-13T00:57:02,354 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] handler.AssignRegionHandler(135): Open testExportWithResetTtl,1,1734051421936.8e8891b618f49dd148e2915558bb2c48. 2024-12-13T00:57:02,354 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(7285): Opening region: {ENCODED => 8e8891b618f49dd148e2915558bb2c48, NAME => 'testExportWithResetTtl,1,1734051421936.8e8891b618f49dd148e2915558bb2c48.', STARTKEY => '1', ENDKEY => ''} 2024-12-13T00:57:02,355 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(7999): Registered coprocessor service: region=testExportWithResetTtl,1,1734051421936.8e8891b618f49dd148e2915558bb2c48. service=AccessControlService 2024-12-13T00:57:02,355 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-13T00:57:02,355 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportWithResetTtl 8e8891b618f49dd148e2915558bb2c48 2024-12-13T00:57:02,355 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(894): Instantiated testExportWithResetTtl,1,1734051421936.8e8891b618f49dd148e2915558bb2c48.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T00:57:02,355 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(7327): checking encryption for 8e8891b618f49dd148e2915558bb2c48 2024-12-13T00:57:02,355 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(7330): checking classloading for 8e8891b618f49dd148e2915558bb2c48 2024-12-13T00:57:02,357 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testExportWithResetTtl/cef781983bab36286c8a20673ce80a17/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T00:57:02,357 INFO [StoreOpener-8e8891b618f49dd148e2915558bb2c48-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 8e8891b618f49dd148e2915558bb2c48 2024-12-13T00:57:02,358 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(1102): Opened cef781983bab36286c8a20673ce80a17; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72440332, jitterRate=0.07944506406784058}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-13T00:57:02,359 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(1001): Region open journal for cef781983bab36286c8a20673ce80a17: 2024-12-13T00:57:02,360 INFO [StoreOpener-8e8891b618f49dd148e2915558bb2c48-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8e8891b618f49dd148e2915558bb2c48 columnFamilyName cf 2024-12-13T00:57:02,360 DEBUG [StoreOpener-8e8891b618f49dd148e2915558bb2c48-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T00:57:02,360 INFO [StoreOpener-8e8891b618f49dd148e2915558bb2c48-1 {}] regionserver.HStore(327): Store=8e8891b618f49dd148e2915558bb2c48/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T00:57:02,361 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testExportWithResetTtl/8e8891b618f49dd148e2915558bb2c48 2024-12-13T00:57:02,361 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegionServer(2601): Post open deploy tasks for testExportWithResetTtl,,1734051421936.cef781983bab36286c8a20673ce80a17., pid=44, masterSystemTime=1734051422329 2024-12-13T00:57:02,362 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testExportWithResetTtl/8e8891b618f49dd148e2915558bb2c48 2024-12-13T00:57:02,373 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegionServer(2628): Finished post open deploy task for testExportWithResetTtl,,1734051421936.cef781983bab36286c8a20673ce80a17. 2024-12-13T00:57:02,374 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] handler.AssignRegionHandler(164): Opened testExportWithResetTtl,,1734051421936.cef781983bab36286c8a20673ce80a17. 2024-12-13T00:57:02,375 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=42 updating hbase:meta row=cef781983bab36286c8a20673ce80a17, regionState=OPEN, openSeqNum=2, regionLocation=9812beb1e4cd,45231,1734051384968 2024-12-13T00:57:02,377 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(1085): writing seq id for 8e8891b618f49dd148e2915558bb2c48 2024-12-13T00:57:02,381 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testExportWithResetTtl/8e8891b618f49dd148e2915558bb2c48/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T00:57:02,382 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=44, resume processing ppid=42 2024-12-13T00:57:02,382 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=44, ppid=42, state=SUCCESS; OpenRegionProcedure cef781983bab36286c8a20673ce80a17, server=9812beb1e4cd,45231,1734051384968 in 201 msec 2024-12-13T00:57:02,382 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(1102): Opened 8e8891b618f49dd148e2915558bb2c48; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63340634, jitterRate=-0.056151002645492554}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-13T00:57:02,382 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(1001): Region open journal for 8e8891b618f49dd148e2915558bb2c48: 2024-12-13T00:57:02,383 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegionServer(2601): Post open deploy tasks for testExportWithResetTtl,1,1734051421936.8e8891b618f49dd148e2915558bb2c48., pid=45, masterSystemTime=1734051422331 2024-12-13T00:57:02,386 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=42, ppid=41, state=SUCCESS; TransitRegionStateProcedure table=testExportWithResetTtl, region=cef781983bab36286c8a20673ce80a17, ASSIGN in 362 msec 2024-12-13T00:57:02,386 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegionServer(2628): Finished post open deploy task for testExportWithResetTtl,1,1734051421936.8e8891b618f49dd148e2915558bb2c48. 2024-12-13T00:57:02,387 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] handler.AssignRegionHandler(164): Opened testExportWithResetTtl,1,1734051421936.8e8891b618f49dd148e2915558bb2c48. 2024-12-13T00:57:02,388 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=43 updating hbase:meta row=8e8891b618f49dd148e2915558bb2c48, regionState=OPEN, openSeqNum=2, regionLocation=9812beb1e4cd,46075,1734051384875 2024-12-13T00:57:02,396 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=45, resume processing ppid=43 2024-12-13T00:57:02,397 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=45, ppid=43, state=SUCCESS; OpenRegionProcedure 8e8891b618f49dd148e2915558bb2c48, server=9812beb1e4cd,46075,1734051384875 in 214 msec 2024-12-13T00:57:02,399 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=43, resume processing ppid=41 2024-12-13T00:57:02,399 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=43, ppid=41, state=SUCCESS; TransitRegionStateProcedure table=testExportWithResetTtl, region=8e8891b618f49dd148e2915558bb2c48, ASSIGN in 377 msec 2024-12-13T00:57:02,400 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-13T00:57:02,401 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734051422400"}]},"ts":"1734051422400"} 2024-12-13T00:57:02,403 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testExportWithResetTtl, state=ENABLED in hbase:meta 2024-12-13T00:57:02,407 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_POST_OPERATION 2024-12-13T00:57:02,407 DEBUG [PEWorker-1 {}] access.PermissionStorage(175): Writing permission with rowKey testExportWithResetTtl jenkins: RWXCA 2024-12-13T00:57:02,410 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40687 {}] access.PermissionStorage(611): Read acl: entry[testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-13T00:57:02,413 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46075-0x10088dfff310001, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T00:57:02,413 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T00:57:02,413 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45231-0x10088dfff310002, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T00:57:02,413 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40687-0x10088dfff310003, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T00:57:02,417 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-13T00:57:02,417 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-13T00:57:02,417 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-13T00:57:02,418 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-13T00:57:02,419 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-13T00:57:02,419 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-13T00:57:02,419 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-13T00:57:02,419 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-13T00:57:02,425 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=41, state=SUCCESS; CreateTableProcedure table=testExportWithResetTtl in 479 msec 2024-12-13T00:57:02,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-13T00:57:02,546 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testExportWithResetTtl, procId: 41 completed 2024-12-13T00:57:02,546 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testExportWithResetTtl get assigned. Timeout = 60000ms 2024-12-13T00:57:02,547 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-13T00:57:02,551 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testExportWithResetTtl assigned to meta. Checking AM states. 2024-12-13T00:57:02,552 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-13T00:57:02,552 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testExportWithResetTtl assigned. 2024-12-13T00:57:02,565 DEBUG [htable-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T00:57:02,566 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45231 {}] regionserver.HRegion(8254): writing data to region testExportWithResetTtl,,1734051421936.cef781983bab36286c8a20673ce80a17. with WAL disabled. Data may be lost in the event of a crash. 2024-12-13T00:57:02,575 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40630, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T00:57:02,578 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46075 {}] regionserver.HRegion(8254): writing data to region testExportWithResetTtl,1,1734051421936.8e8891b618f49dd148e2915558bb2c48. with WAL disabled. Data may be lost in the event of a crash. 2024-12-13T00:57:02,583 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testExportWithResetTtl 2024-12-13T00:57:02,584 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testExportWithResetTtl,,1734051421936.cef781983bab36286c8a20673ce80a17. 2024-12-13T00:57:02,584 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-13T00:57:02,601 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } 2024-12-13T00:57:02,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734051422601 (current time:1734051422601). 2024-12-13T00:57:02,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb-testExportWithResetTtl VERSION not specified, setting to 2 2024-12-13T00:57:02,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-13T00:57:02,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1e115246 to 127.0.0.1:49424 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@44feba3d 2024-12-13T00:57:02,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@28d6e37c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T00:57:02,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T00:57:02,612 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60276, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T00:57:02,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1e115246 to 127.0.0.1:49424 2024-12-13T00:57:02,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T00:57:02,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x69e6aff6 to 127.0.0.1:49424 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@58f8dfa5 2024-12-13T00:57:02,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@472a1ac5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T00:57:02,623 DEBUG [hconnection-0x30a7c9fd-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T00:57:02,624 INFO [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60284, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T00:57:02,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x69e6aff6 to 127.0.0.1:49424 2024-12-13T00:57:02,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T00:57:02,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] access.PermissionStorage(611): Read acl: entry[testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-13T00:57:02,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-13T00:57:02,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] procedure2.ProcedureExecutor(1098): Stored pid=46, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } 2024-12-13T00:57:02,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 }, snapshot procedure id = 46 2024-12-13T00:57:02,631 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_PREPARE 2024-12-13T00:57:02,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=46 2024-12-13T00:57:02,632 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-13T00:57:02,635 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-13T00:57:02,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741914_1090 (size=143) 2024-12-13T00:57:02,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741914_1090 (size=143) 2024-12-13T00:57:02,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741914_1090 (size=143) 2024-12-13T00:57:02,648 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-13T00:57:02,648 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=47, ppid=46, state=RUNNABLE; SnapshotRegionProcedure cef781983bab36286c8a20673ce80a17}, {pid=48, ppid=46, state=RUNNABLE; SnapshotRegionProcedure 8e8891b618f49dd148e2915558bb2c48}] 2024-12-13T00:57:02,654 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=48, ppid=46, state=RUNNABLE; SnapshotRegionProcedure 8e8891b618f49dd148e2915558bb2c48 2024-12-13T00:57:02,654 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=47, ppid=46, state=RUNNABLE; SnapshotRegionProcedure cef781983bab36286c8a20673ce80a17 2024-12-13T00:57:02,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=46 2024-12-13T00:57:02,806 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,45231,1734051384968 2024-12-13T00:57:02,806 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,46075,1734051384875 2024-12-13T00:57:02,807 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45231 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=47 2024-12-13T00:57:02,807 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46075 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=48 2024-12-13T00:57:02,807 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportWithResetTtl,,1734051421936.cef781983bab36286c8a20673ce80a17. 2024-12-13T00:57:02,807 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.HRegion(2837): Flushing cef781983bab36286c8a20673ce80a17 1/1 column families, dataSize=333 B heapSize=976 B 2024-12-13T00:57:02,807 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportWithResetTtl,1,1734051421936.8e8891b618f49dd148e2915558bb2c48. 2024-12-13T00:57:02,807 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.HRegion(2837): Flushing 8e8891b618f49dd148e2915558bb2c48 1/1 column families, dataSize=2.93 KB heapSize=6.58 KB 2024-12-13T00:57:02,829 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testExportWithResetTtl/8e8891b618f49dd148e2915558bb2c48/.tmp/cf/acebf318d211486eb005773b4460689a is 71, key is 181383bd3be1cdab05d07275916c95f1/cf:q/1734051422578/Put/seqid=0 2024-12-13T00:57:02,836 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testExportWithResetTtl/cef781983bab36286c8a20673ce80a17/.tmp/cf/bd15b9795cf148df969c01a7ceeae478 is 71, key is 03786e6649bb4e23297024e18b30264b/cf:q/1734051422566/Put/seqid=0 2024-12-13T00:57:02,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741915_1091 (size=8188) 2024-12-13T00:57:02,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741915_1091 (size=8188) 2024-12-13T00:57:02,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741915_1091 (size=8188) 2024-12-13T00:57:02,852 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.93 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testExportWithResetTtl/8e8891b618f49dd148e2915558bb2c48/.tmp/cf/acebf318d211486eb005773b4460689a 2024-12-13T00:57:02,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741916_1092 (size=5424) 2024-12-13T00:57:02,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741916_1092 (size=5424) 2024-12-13T00:57:02,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741916_1092 (size=5424) 2024-12-13T00:57:02,873 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=333 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testExportWithResetTtl/cef781983bab36286c8a20673ce80a17/.tmp/cf/bd15b9795cf148df969c01a7ceeae478 2024-12-13T00:57:02,885 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testExportWithResetTtl/cef781983bab36286c8a20673ce80a17/.tmp/cf/bd15b9795cf148df969c01a7ceeae478 as hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testExportWithResetTtl/cef781983bab36286c8a20673ce80a17/cf/bd15b9795cf148df969c01a7ceeae478 2024-12-13T00:57:02,897 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testExportWithResetTtl/cef781983bab36286c8a20673ce80a17/cf/bd15b9795cf148df969c01a7ceeae478, entries=5, sequenceid=5, filesize=5.3 K 2024-12-13T00:57:02,899 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.HRegion(3040): Finished flush of dataSize ~333 B/333, heapSize ~960 B/960, currentSize=0 B/0 for cef781983bab36286c8a20673ce80a17 in 91ms, sequenceid=5, compaction requested=false 2024-12-13T00:57:02,899 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportWithResetTtl' 2024-12-13T00:57:02,900 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testExportWithResetTtl/8e8891b618f49dd148e2915558bb2c48/.tmp/cf/acebf318d211486eb005773b4460689a as hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testExportWithResetTtl/8e8891b618f49dd148e2915558bb2c48/cf/acebf318d211486eb005773b4460689a 2024-12-13T00:57:02,900 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.HRegion(2538): Flush status journal for cef781983bab36286c8a20673ce80a17: 2024-12-13T00:57:02,900 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportWithResetTtl,,1734051421936.cef781983bab36286c8a20673ce80a17. for snaptb-testExportWithResetTtl completed. 2024-12-13T00:57:02,900 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] snapshot.SnapshotManifest(241): Storing 'testExportWithResetTtl,,1734051421936.cef781983bab36286c8a20673ce80a17.' region-info for snapshot=snaptb-testExportWithResetTtl 2024-12-13T00:57:02,900 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-13T00:57:02,900 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testExportWithResetTtl/cef781983bab36286c8a20673ce80a17/cf/bd15b9795cf148df969c01a7ceeae478] hfiles 2024-12-13T00:57:02,900 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testExportWithResetTtl/cef781983bab36286c8a20673ce80a17/cf/bd15b9795cf148df969c01a7ceeae478 for snapshot=snaptb-testExportWithResetTtl 2024-12-13T00:57:02,913 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testExportWithResetTtl/8e8891b618f49dd148e2915558bb2c48/cf/acebf318d211486eb005773b4460689a, entries=45, sequenceid=5, filesize=8.0 K 2024-12-13T00:57:02,915 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.HRegion(3040): Finished flush of dataSize ~2.93 KB/3003, heapSize ~6.56 KB/6720, currentSize=0 B/0 for 8e8891b618f49dd148e2915558bb2c48 in 108ms, sequenceid=5, compaction requested=false 2024-12-13T00:57:02,915 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.HRegion(2538): Flush status journal for 8e8891b618f49dd148e2915558bb2c48: 2024-12-13T00:57:02,915 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportWithResetTtl,1,1734051421936.8e8891b618f49dd148e2915558bb2c48. for snaptb-testExportWithResetTtl completed. 2024-12-13T00:57:02,915 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] snapshot.SnapshotManifest(241): Storing 'testExportWithResetTtl,1,1734051421936.8e8891b618f49dd148e2915558bb2c48.' region-info for snapshot=snaptb-testExportWithResetTtl 2024-12-13T00:57:02,915 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-13T00:57:02,915 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testExportWithResetTtl/8e8891b618f49dd148e2915558bb2c48/cf/acebf318d211486eb005773b4460689a] hfiles 2024-12-13T00:57:02,915 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testExportWithResetTtl/8e8891b618f49dd148e2915558bb2c48/cf/acebf318d211486eb005773b4460689a for snapshot=snaptb-testExportWithResetTtl 2024-12-13T00:57:02,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=46 2024-12-13T00:57:02,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741918_1094 (size=100) 2024-12-13T00:57:02,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741918_1094 (size=100) 2024-12-13T00:57:02,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741918_1094 (size=100) 2024-12-13T00:57:02,966 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportWithResetTtl,1,1734051421936.8e8891b618f49dd148e2915558bb2c48. 2024-12-13T00:57:02,966 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=48 2024-12-13T00:57:02,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.HMaster(4106): Remote procedure done, pid=48 2024-12-13T00:57:02,968 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb-testExportWithResetTtl on region 8e8891b618f49dd148e2915558bb2c48 2024-12-13T00:57:02,968 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=48, ppid=46, state=RUNNABLE; SnapshotRegionProcedure 8e8891b618f49dd148e2915558bb2c48 2024-12-13T00:57:02,971 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=48, ppid=46, state=SUCCESS; SnapshotRegionProcedure 8e8891b618f49dd148e2915558bb2c48 in 321 msec 2024-12-13T00:57:02,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741917_1093 (size=100) 2024-12-13T00:57:02,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741917_1093 (size=100) 2024-12-13T00:57:02,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741917_1093 (size=100) 2024-12-13T00:57:02,980 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportWithResetTtl,,1734051421936.cef781983bab36286c8a20673ce80a17. 2024-12-13T00:57:02,980 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=47 2024-12-13T00:57:02,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.HMaster(4106): Remote procedure done, pid=47 2024-12-13T00:57:02,981 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb-testExportWithResetTtl on region cef781983bab36286c8a20673ce80a17 2024-12-13T00:57:02,981 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=47, ppid=46, state=RUNNABLE; SnapshotRegionProcedure cef781983bab36286c8a20673ce80a17 2024-12-13T00:57:02,988 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=47, resume processing ppid=46 2024-12-13T00:57:02,989 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=47, ppid=46, state=SUCCESS; SnapshotRegionProcedure cef781983bab36286c8a20673ce80a17 in 334 msec 2024-12-13T00:57:02,989 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-13T00:57:02,990 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-13T00:57:02,991 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-13T00:57:02,991 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb-testExportWithResetTtl 2024-12-13T00:57:02,993 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-12-13T00:57:03,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741919_1095 (size=600) 2024-12-13T00:57:03,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741919_1095 (size=600) 2024-12-13T00:57:03,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741919_1095 (size=600) 2024-12-13T00:57:03,019 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-13T00:57:03,031 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-13T00:57:03,032 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl to hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-12-13T00:57:03,034 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_POST_OPERATION 2024-12-13T00:57:03,034 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 }, snapshot procedure id = 46 2024-12-13T00:57:03,035 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=46, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } in 405 msec 2024-12-13T00:57:03,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=46 2024-12-13T00:57:03,237 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testExportWithResetTtl, procId: 46 completed 2024-12-13T00:57:03,257 INFO [Time-limited test {}] snapshot.TestExportSnapshot(468): HDFS export destination path: hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/export-test/export-1734051423257 2024-12-13T00:57:03,257 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=hdfs://localhost:46775, tgtDir=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/export-test/export-1734051423257, rawTgtDir=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/export-test/export-1734051423257, srcFsUri=hdfs://localhost:46775, srcDir=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee 2024-12-13T00:57:03,304 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:46775, inputRoot=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee 2024-12-13T00:57:03,304 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1349531497_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/export-test/export-1734051423257, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/export-test/export-1734051423257/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-12-13T00:57:03,306 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-13T00:57:03,313 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/snaptb-testExportWithResetTtl to hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/export-test/export-1734051423257/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-12-13T00:57:03,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741920_1096 (size=143) 2024-12-13T00:57:03,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741920_1096 (size=143) 2024-12-13T00:57:03,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741920_1096 (size=143) 2024-12-13T00:57:03,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741921_1097 (size=600) 2024-12-13T00:57:03,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741921_1097 (size=600) 2024-12-13T00:57:03,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741921_1097 (size=600) 2024-12-13T00:57:03,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741922_1098 (size=141) 2024-12-13T00:57:03,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741922_1098 (size=141) 2024-12-13T00:57:03,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741922_1098 (size=141) 2024-12-13T00:57:03,388 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar 2024-12-13T00:57:03,389 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-13T00:57:03,389 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-13T00:57:03,390 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-13T00:57:04,033 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734051393214_0001_000001 (auth:SIMPLE) from 127.0.0.1:41708 2024-12-13T00:57:04,518 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/hadoop-15906204517131493884.jar 2024-12-13T00:57:04,519 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-13T00:57:04,519 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-13T00:57:04,530 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl 2024-12-13T00:57:04,530 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl Metrics about Tables on a single HBase RegionServer 2024-12-13T00:57:04,532 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl 2024-12-13T00:57:04,532 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl Metrics about Tables on a single HBase RegionServer 2024-12-13T00:57:04,533 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName 2024-12-13T00:57:04,608 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/hadoop-7182064506974146907.jar 2024-12-13T00:57:04,608 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-13T00:57:04,609 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-13T00:57:04,610 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-13T00:57:04,610 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-13T00:57:04,610 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-13T00:57:04,611 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-13T00:57:04,611 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-13T00:57:04,612 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-13T00:57:04,612 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-13T00:57:04,613 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-13T00:57:04,613 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-13T00:57:04,614 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-13T00:57:04,614 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-13T00:57:04,614 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-13T00:57:04,615 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-13T00:57:04,615 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-13T00:57:04,616 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-13T00:57:04,616 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-13T00:57:04,617 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-13T00:57:04,617 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-13T00:57:04,618 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-13T00:57:04,618 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-13T00:57:04,618 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-13T00:57:04,619 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-13T00:57:04,619 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-13T00:57:04,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741923_1099 (size=127628) 2024-12-13T00:57:04,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741923_1099 (size=127628) 2024-12-13T00:57:04,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741923_1099 (size=127628) 2024-12-13T00:57:04,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741924_1100 (size=2172137) 2024-12-13T00:57:04,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741924_1100 (size=2172137) 2024-12-13T00:57:04,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741924_1100 (size=2172137) 2024-12-13T00:57:04,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741925_1101 (size=213228) 2024-12-13T00:57:04,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741925_1101 (size=213228) 2024-12-13T00:57:04,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741925_1101 (size=213228) 2024-12-13T00:57:04,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741926_1102 (size=451756) 2024-12-13T00:57:04,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741926_1102 (size=451756) 2024-12-13T00:57:04,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741926_1102 (size=451756) 2024-12-13T00:57:04,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741927_1103 (size=1877034) 2024-12-13T00:57:04,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741927_1103 (size=1877034) 2024-12-13T00:57:04,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741927_1103 (size=1877034) 2024-12-13T00:57:04,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741928_1104 (size=533455) 2024-12-13T00:57:04,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741928_1104 (size=533455) 2024-12-13T00:57:04,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741928_1104 (size=533455) 2024-12-13T00:57:04,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741929_1105 (size=7280644) 2024-12-13T00:57:04,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741929_1105 (size=7280644) 2024-12-13T00:57:04,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741929_1105 (size=7280644) 2024-12-13T00:57:04,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741930_1106 (size=4188619) 2024-12-13T00:57:04,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741930_1106 (size=4188619) 2024-12-13T00:57:04,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741930_1106 (size=4188619) 2024-12-13T00:57:04,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741931_1107 (size=20406) 2024-12-13T00:57:04,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741931_1107 (size=20406) 2024-12-13T00:57:04,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741931_1107 (size=20406) 2024-12-13T00:57:04,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741932_1108 (size=75495) 2024-12-13T00:57:04,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741932_1108 (size=75495) 2024-12-13T00:57:04,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741932_1108 (size=75495) 2024-12-13T00:57:04,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741933_1109 (size=45609) 2024-12-13T00:57:04,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741933_1109 (size=45609) 2024-12-13T00:57:04,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741933_1109 (size=45609) 2024-12-13T00:57:05,230 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-13T00:57:05,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741934_1110 (size=110084) 2024-12-13T00:57:05,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741934_1110 (size=110084) 2024-12-13T00:57:05,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741934_1110 (size=110084) 2024-12-13T00:57:05,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741935_1111 (size=1323991) 2024-12-13T00:57:05,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741935_1111 (size=1323991) 2024-12-13T00:57:05,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741935_1111 (size=1323991) 2024-12-13T00:57:05,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741936_1112 (size=23076) 2024-12-13T00:57:05,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741936_1112 (size=23076) 2024-12-13T00:57:05,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741936_1112 (size=23076) 2024-12-13T00:57:05,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741937_1113 (size=6350924) 2024-12-13T00:57:05,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741937_1113 (size=6350924) 2024-12-13T00:57:05,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741937_1113 (size=6350924) 2024-12-13T00:57:05,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741938_1114 (size=126803) 2024-12-13T00:57:05,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741938_1114 (size=126803) 2024-12-13T00:57:05,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741938_1114 (size=126803) 2024-12-13T00:57:05,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741939_1115 (size=322274) 2024-12-13T00:57:05,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741939_1115 (size=322274) 2024-12-13T00:57:05,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741939_1115 (size=322274) 2024-12-13T00:57:05,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741940_1116 (size=1832290) 2024-12-13T00:57:05,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741940_1116 (size=1832290) 2024-12-13T00:57:05,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741940_1116 (size=1832290) 2024-12-13T00:57:05,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741941_1117 (size=30081) 2024-12-13T00:57:05,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741941_1117 (size=30081) 2024-12-13T00:57:05,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741941_1117 (size=30081) 2024-12-13T00:57:05,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741942_1118 (size=53616) 2024-12-13T00:57:05,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741942_1118 (size=53616) 2024-12-13T00:57:05,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741942_1118 (size=53616) 2024-12-13T00:57:05,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741943_1119 (size=29229) 2024-12-13T00:57:05,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741943_1119 (size=29229) 2024-12-13T00:57:05,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741943_1119 (size=29229) 2024-12-13T00:57:05,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741944_1120 (size=169089) 2024-12-13T00:57:05,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741944_1120 (size=169089) 2024-12-13T00:57:05,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741944_1120 (size=169089) 2024-12-13T00:57:05,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741945_1121 (size=5175431) 2024-12-13T00:57:05,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741945_1121 (size=5175431) 2024-12-13T00:57:05,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741945_1121 (size=5175431) 2024-12-13T00:57:05,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741946_1122 (size=136454) 2024-12-13T00:57:05,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741946_1122 (size=136454) 2024-12-13T00:57:05,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741946_1122 (size=136454) 2024-12-13T00:57:05,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741947_1123 (size=907855) 2024-12-13T00:57:05,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741947_1123 (size=907855) 2024-12-13T00:57:05,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741947_1123 (size=907855) 2024-12-13T00:57:05,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741948_1124 (size=3317408) 2024-12-13T00:57:05,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741948_1124 (size=3317408) 2024-12-13T00:57:05,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741948_1124 (size=3317408) 2024-12-13T00:57:05,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741949_1125 (size=503880) 2024-12-13T00:57:05,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741949_1125 (size=503880) 2024-12-13T00:57:05,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741949_1125 (size=503880) 2024-12-13T00:57:05,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741950_1126 (size=4695811) 2024-12-13T00:57:05,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741950_1126 (size=4695811) 2024-12-13T00:57:05,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741950_1126 (size=4695811) 2024-12-13T00:57:05,895 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-13T00:57:05,899 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb-testExportWithResetTtl' hfile list 2024-12-13T00:57:05,903 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=13.3 K 2024-12-13T00:57:05,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741951_1127 (size=324) 2024-12-13T00:57:05,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741951_1127 (size=324) 2024-12-13T00:57:05,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741951_1127 (size=324) 2024-12-13T00:57:06,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741952_1128 (size=15) 2024-12-13T00:57:06,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741952_1128 (size=15) 2024-12-13T00:57:06,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741952_1128 (size=15) 2024-12-13T00:57:06,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741953_1129 (size=304879) 2024-12-13T00:57:06,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741953_1129 (size=304879) 2024-12-13T00:57:06,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741953_1129 (size=304879) 2024-12-13T00:57:06,189 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-13T00:57:06,189 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-13T00:57:06,475 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734051393214_0002_000001 (auth:SIMPLE) from 127.0.0.1:50812 2024-12-13T00:57:09,133 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/MiniMRCluster_1870862804/yarn-2296387876/MiniMRCluster_1870862804-localDir-nm-0_2/usercache/jenkins/appcache/application_1734051393214_0001/container_1734051393214_0001_01_000001/launch_container.sh] 2024-12-13T00:57:09,133 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/MiniMRCluster_1870862804/yarn-2296387876/MiniMRCluster_1870862804-localDir-nm-0_2/usercache/jenkins/appcache/application_1734051393214_0001/container_1734051393214_0001_01_000001/container_tokens] 2024-12-13T00:57:09,133 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/MiniMRCluster_1870862804/yarn-2296387876/MiniMRCluster_1870862804-localDir-nm-0_2/usercache/jenkins/appcache/application_1734051393214_0001/container_1734051393214_0001_01_000001/sysfs] 2024-12-13T00:57:11,034 INFO [master/9812beb1e4cd:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-13T00:57:11,034 INFO [master/9812beb1e4cd:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-13T00:57:13,717 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734051393214_0002_000001 (auth:SIMPLE) from 127.0.0.1:53154 2024-12-13T00:57:14,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741954_1130 (size=350553) 2024-12-13T00:57:14,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741954_1130 (size=350553) 2024-12-13T00:57:14,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741954_1130 (size=350553) 2024-12-13T00:57:16,070 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734051393214_0002_000001 (auth:SIMPLE) from 127.0.0.1:35706 2024-12-13T00:57:21,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741955_1131 (size=8188) 2024-12-13T00:57:21,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741955_1131 (size=8188) 2024-12-13T00:57:21,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741955_1131 (size=8188) 2024-12-13T00:57:21,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741956_1132 (size=5424) 2024-12-13T00:57:21,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741956_1132 (size=5424) 2024-12-13T00:57:21,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741956_1132 (size=5424) 2024-12-13T00:57:21,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741957_1133 (size=17398) 2024-12-13T00:57:21,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741957_1133 (size=17398) 2024-12-13T00:57:21,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741957_1133 (size=17398) 2024-12-13T00:57:21,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741958_1134 (size=461) 2024-12-13T00:57:21,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741958_1134 (size=461) 2024-12-13T00:57:21,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741958_1134 (size=461) 2024-12-13T00:57:21,756 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/MiniMRCluster_1870862804/yarn-2296387876/MiniMRCluster_1870862804-localDir-nm-0_2/usercache/jenkins/appcache/application_1734051393214_0002/container_1734051393214_0002_01_000002/launch_container.sh] 2024-12-13T00:57:21,756 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/MiniMRCluster_1870862804/yarn-2296387876/MiniMRCluster_1870862804-localDir-nm-0_2/usercache/jenkins/appcache/application_1734051393214_0002/container_1734051393214_0002_01_000002/container_tokens] 2024-12-13T00:57:21,756 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/MiniMRCluster_1870862804/yarn-2296387876/MiniMRCluster_1870862804-localDir-nm-0_2/usercache/jenkins/appcache/application_1734051393214_0002/container_1734051393214_0002_01_000002/sysfs] 2024-12-13T00:57:21,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741959_1135 (size=17398) 2024-12-13T00:57:21,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741959_1135 (size=17398) 2024-12-13T00:57:21,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741959_1135 (size=17398) 2024-12-13T00:57:21,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741960_1136 (size=350553) 2024-12-13T00:57:21,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741960_1136 (size=350553) 2024-12-13T00:57:21,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741960_1136 (size=350553) 2024-12-13T00:57:22,761 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-13T00:57:23,463 INFO [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export 2024-12-13T00:57:23,464 INFO [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity. 2024-12-13T00:57:23,473 INFO [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: snaptb-testExportWithResetTtl 2024-12-13T00:57:23,473 INFO [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot 2024-12-13T00:57:23,474 INFO [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state 2024-12-13T00:57:23,474 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1349531497_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/snaptb-testExportWithResetTtl at hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-12-13T00:57:23,475 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/snaptb-testExportWithResetTtl/.snapshotinfo 2024-12-13T00:57:23,475 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/snaptb-testExportWithResetTtl/data.manifest 2024-12-13T00:57:23,475 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1349531497_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/export-test/export-1734051423257/.hbase-snapshot/snaptb-testExportWithResetTtl at hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/export-test/export-1734051423257/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-12-13T00:57:23,475 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/export-test/export-1734051423257/.hbase-snapshot/snaptb-testExportWithResetTtl/.snapshotinfo 2024-12-13T00:57:23,475 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/export-test/export-1734051423257/.hbase-snapshot/snaptb-testExportWithResetTtl/data.manifest 2024-12-13T00:57:23,483 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testExportWithResetTtl 2024-12-13T00:57:23,484 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testExportWithResetTtl 2024-12-13T00:57:23,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] procedure2.ProcedureExecutor(1098): Stored pid=49, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testExportWithResetTtl 2024-12-13T00:57:23,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-13T00:57:23,487 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734051443487"}]},"ts":"1734051443487"} 2024-12-13T00:57:23,489 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testExportWithResetTtl, state=DISABLING in hbase:meta 2024-12-13T00:57:23,492 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testExportWithResetTtl to state=DISABLING 2024-12-13T00:57:23,492 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=50, ppid=49, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testExportWithResetTtl}] 2024-12-13T00:57:23,494 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=51, ppid=50, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testExportWithResetTtl, region=cef781983bab36286c8a20673ce80a17, UNASSIGN}, {pid=52, ppid=50, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testExportWithResetTtl, region=8e8891b618f49dd148e2915558bb2c48, UNASSIGN}] 2024-12-13T00:57:23,495 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=52, ppid=50, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testExportWithResetTtl, region=8e8891b618f49dd148e2915558bb2c48, UNASSIGN 2024-12-13T00:57:23,496 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=51, ppid=50, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testExportWithResetTtl, region=cef781983bab36286c8a20673ce80a17, UNASSIGN 2024-12-13T00:57:23,496 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=52 updating hbase:meta row=8e8891b618f49dd148e2915558bb2c48, regionState=CLOSING, regionLocation=9812beb1e4cd,46075,1734051384875 2024-12-13T00:57:23,497 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=51 updating hbase:meta row=cef781983bab36286c8a20673ce80a17, regionState=CLOSING, regionLocation=9812beb1e4cd,45231,1734051384968 2024-12-13T00:57:23,498 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-13T00:57:23,498 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=53, ppid=51, state=RUNNABLE; CloseRegionProcedure cef781983bab36286c8a20673ce80a17, server=9812beb1e4cd,45231,1734051384968}] 2024-12-13T00:57:23,500 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-13T00:57:23,500 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=54, ppid=52, state=RUNNABLE; CloseRegionProcedure 8e8891b618f49dd148e2915558bb2c48, server=9812beb1e4cd,46075,1734051384875}] 2024-12-13T00:57:23,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-13T00:57:23,650 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,45231,1734051384968 2024-12-13T00:57:23,652 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,46075,1734051384875 2024-12-13T00:57:23,652 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] handler.UnassignRegionHandler(124): Close cef781983bab36286c8a20673ce80a17 2024-12-13T00:57:23,652 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-13T00:57:23,652 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] regionserver.HRegion(1681): Closing cef781983bab36286c8a20673ce80a17, disabling compactions & flushes 2024-12-13T00:57:23,652 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] regionserver.HRegion(1703): Closing region testExportWithResetTtl,,1734051421936.cef781983bab36286c8a20673ce80a17. 2024-12-13T00:57:23,652 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] regionserver.HRegion(1724): Waiting without time limit for close lock on testExportWithResetTtl,,1734051421936.cef781983bab36286c8a20673ce80a17. 2024-12-13T00:57:23,652 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] regionserver.HRegion(1791): Acquired close lock on testExportWithResetTtl,,1734051421936.cef781983bab36286c8a20673ce80a17. after waiting 0 ms 2024-12-13T00:57:23,652 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] regionserver.HRegion(1801): Updates disabled for region testExportWithResetTtl,,1734051421936.cef781983bab36286c8a20673ce80a17. 2024-12-13T00:57:23,653 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] handler.UnassignRegionHandler(124): Close 8e8891b618f49dd148e2915558bb2c48 2024-12-13T00:57:23,653 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-13T00:57:23,653 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegion(1681): Closing 8e8891b618f49dd148e2915558bb2c48, disabling compactions & flushes 2024-12-13T00:57:23,653 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegion(1703): Closing region testExportWithResetTtl,1,1734051421936.8e8891b618f49dd148e2915558bb2c48. 2024-12-13T00:57:23,653 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegion(1724): Waiting without time limit for close lock on testExportWithResetTtl,1,1734051421936.8e8891b618f49dd148e2915558bb2c48. 2024-12-13T00:57:23,653 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegion(1791): Acquired close lock on testExportWithResetTtl,1,1734051421936.8e8891b618f49dd148e2915558bb2c48. after waiting 0 ms 2024-12-13T00:57:23,653 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegion(1801): Updates disabled for region testExportWithResetTtl,1,1734051421936.8e8891b618f49dd148e2915558bb2c48. 2024-12-13T00:57:23,676 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testExportWithResetTtl/cef781983bab36286c8a20673ce80a17/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-13T00:57:23,677 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-13T00:57:23,677 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] regionserver.HRegion(1922): Closed testExportWithResetTtl,,1734051421936.cef781983bab36286c8a20673ce80a17. 2024-12-13T00:57:23,677 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] regionserver.HRegion(1635): Region close journal for cef781983bab36286c8a20673ce80a17: 2024-12-13T00:57:23,680 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] handler.UnassignRegionHandler(170): Closed cef781983bab36286c8a20673ce80a17 2024-12-13T00:57:23,680 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=51 updating hbase:meta row=cef781983bab36286c8a20673ce80a17, regionState=CLOSED 2024-12-13T00:57:23,685 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=53, resume processing ppid=51 2024-12-13T00:57:23,685 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=53, ppid=51, state=SUCCESS; CloseRegionProcedure cef781983bab36286c8a20673ce80a17, server=9812beb1e4cd,45231,1734051384968 in 184 msec 2024-12-13T00:57:23,688 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=51, ppid=50, state=SUCCESS; TransitRegionStateProcedure table=testExportWithResetTtl, region=cef781983bab36286c8a20673ce80a17, UNASSIGN in 191 msec 2024-12-13T00:57:23,702 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testExportWithResetTtl/8e8891b618f49dd148e2915558bb2c48/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-13T00:57:23,707 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-13T00:57:23,707 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegion(1922): Closed testExportWithResetTtl,1,1734051421936.8e8891b618f49dd148e2915558bb2c48. 2024-12-13T00:57:23,707 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegion(1635): Region close journal for 8e8891b618f49dd148e2915558bb2c48: 2024-12-13T00:57:23,710 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] handler.UnassignRegionHandler(170): Closed 8e8891b618f49dd148e2915558bb2c48 2024-12-13T00:57:23,710 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=52 updating hbase:meta row=8e8891b618f49dd148e2915558bb2c48, regionState=CLOSED 2024-12-13T00:57:23,720 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=54, resume processing ppid=52 2024-12-13T00:57:23,720 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=54, ppid=52, state=SUCCESS; CloseRegionProcedure 8e8891b618f49dd148e2915558bb2c48, server=9812beb1e4cd,46075,1734051384875 in 212 msec 2024-12-13T00:57:23,722 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=52, resume processing ppid=50 2024-12-13T00:57:23,722 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=52, ppid=50, state=SUCCESS; TransitRegionStateProcedure table=testExportWithResetTtl, region=8e8891b618f49dd148e2915558bb2c48, UNASSIGN in 226 msec 2024-12-13T00:57:23,724 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=50, resume processing ppid=49 2024-12-13T00:57:23,724 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=50, ppid=49, state=SUCCESS; CloseTableRegionsProcedure table=testExportWithResetTtl in 230 msec 2024-12-13T00:57:23,725 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734051443725"}]},"ts":"1734051443725"} 2024-12-13T00:57:23,727 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testExportWithResetTtl, state=DISABLED in hbase:meta 2024-12-13T00:57:23,729 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set testExportWithResetTtl to state=DISABLED 2024-12-13T00:57:23,731 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=49, state=SUCCESS; DisableTableProcedure table=testExportWithResetTtl in 246 msec 2024-12-13T00:57:23,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-13T00:57:23,790 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testExportWithResetTtl, procId: 49 completed 2024-12-13T00:57:23,791 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testExportWithResetTtl 2024-12-13T00:57:23,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] procedure2.ProcedureExecutor(1098): Stored pid=55, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testExportWithResetTtl 2024-12-13T00:57:23,793 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=55, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-13T00:57:23,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] access.PermissionStorage(259): Removing permissions of removed table testExportWithResetTtl 2024-12-13T00:57:23,796 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=55, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-13T00:57:23,797 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40687 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testExportWithResetTtl 2024-12-13T00:57:23,801 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46075-0x10088dfff310001, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-13T00:57:23,801 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-13T00:57:23,801 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45231-0x10088dfff310002, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-13T00:57:23,801 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40687-0x10088dfff310003, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-13T00:57:23,802 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-12-13T00:57:23,803 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-12-13T00:57:23,803 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-12-13T00:57:23,804 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-12-13T00:57:23,809 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testExportWithResetTtl/cef781983bab36286c8a20673ce80a17 2024-12-13T00:57:23,809 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testExportWithResetTtl/8e8891b618f49dd148e2915558bb2c48 2024-12-13T00:57:23,811 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-13T00:57:23,811 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T00:57:23,811 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45231-0x10088dfff310002, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-13T00:57:23,811 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45231-0x10088dfff310002, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T00:57:23,811 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46075-0x10088dfff310001, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-13T00:57:23,811 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46075-0x10088dfff310001, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T00:57:23,811 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40687-0x10088dfff310003, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-13T00:57:23,811 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40687-0x10088dfff310003, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T00:57:23,814 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-13T00:57:23,814 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-13T00:57:23,814 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-13T00:57:23,814 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-13T00:57:23,815 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testExportWithResetTtl/8e8891b618f49dd148e2915558bb2c48/cf, FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testExportWithResetTtl/8e8891b618f49dd148e2915558bb2c48/recovered.edits] 2024-12-13T00:57:23,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-13T00:57:23,816 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testExportWithResetTtl/cef781983bab36286c8a20673ce80a17/cf, FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testExportWithResetTtl/cef781983bab36286c8a20673ce80a17/recovered.edits] 2024-12-13T00:57:23,823 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testExportWithResetTtl/8e8891b618f49dd148e2915558bb2c48/cf/acebf318d211486eb005773b4460689a to hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/archive/data/default/testExportWithResetTtl/8e8891b618f49dd148e2915558bb2c48/cf/acebf318d211486eb005773b4460689a 2024-12-13T00:57:23,825 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testExportWithResetTtl/cef781983bab36286c8a20673ce80a17/cf/bd15b9795cf148df969c01a7ceeae478 to hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/archive/data/default/testExportWithResetTtl/cef781983bab36286c8a20673ce80a17/cf/bd15b9795cf148df969c01a7ceeae478 2024-12-13T00:57:23,827 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testExportWithResetTtl/8e8891b618f49dd148e2915558bb2c48/recovered.edits/8.seqid to hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/archive/data/default/testExportWithResetTtl/8e8891b618f49dd148e2915558bb2c48/recovered.edits/8.seqid 2024-12-13T00:57:23,829 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testExportWithResetTtl/8e8891b618f49dd148e2915558bb2c48 2024-12-13T00:57:23,830 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testExportWithResetTtl/cef781983bab36286c8a20673ce80a17/recovered.edits/8.seqid to hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/archive/data/default/testExportWithResetTtl/cef781983bab36286c8a20673ce80a17/recovered.edits/8.seqid 2024-12-13T00:57:23,831 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testExportWithResetTtl/cef781983bab36286c8a20673ce80a17 2024-12-13T00:57:23,831 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived testExportWithResetTtl regions 2024-12-13T00:57:23,835 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=55, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-13T00:57:23,838 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testExportWithResetTtl from hbase:meta 2024-12-13T00:57:23,842 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(408): Removing 'testExportWithResetTtl' descriptor. 2024-12-13T00:57:23,848 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=55, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-13T00:57:23,848 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(398): Removing 'testExportWithResetTtl' from region states. 2024-12-13T00:57:23,848 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testExportWithResetTtl,,1734051421936.cef781983bab36286c8a20673ce80a17.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734051443848"}]},"ts":"9223372036854775807"} 2024-12-13T00:57:23,848 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testExportWithResetTtl,1,1734051421936.8e8891b618f49dd148e2915558bb2c48.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734051443848"}]},"ts":"9223372036854775807"} 2024-12-13T00:57:23,853 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-13T00:57:23,853 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => cef781983bab36286c8a20673ce80a17, NAME => 'testExportWithResetTtl,,1734051421936.cef781983bab36286c8a20673ce80a17.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 8e8891b618f49dd148e2915558bb2c48, NAME => 'testExportWithResetTtl,1,1734051421936.8e8891b618f49dd148e2915558bb2c48.', STARTKEY => '1', ENDKEY => ''}] 2024-12-13T00:57:23,853 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(402): Marking 'testExportWithResetTtl' as deleted. 2024-12-13T00:57:23,853 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734051443853"}]},"ts":"9223372036854775807"} 2024-12-13T00:57:23,858 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table testExportWithResetTtl state from META 2024-12-13T00:57:23,861 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=55, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-13T00:57:23,863 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=55, state=SUCCESS; DeleteTableProcedure table=testExportWithResetTtl in 70 msec 2024-12-13T00:57:23,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-13T00:57:23,918 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testExportWithResetTtl, procId: 55 completed 2024-12-13T00:57:23,919 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testExportWithResetTtl 2024-12-13T00:57:23,919 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testExportWithResetTtl 2024-12-13T00:57:23,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] procedure2.ProcedureExecutor(1098): Stored pid=56, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testExportWithResetTtl 2024-12-13T00:57:23,923 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734051443923"}]},"ts":"1734051443923"} 2024-12-13T00:57:23,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=56 2024-12-13T00:57:23,925 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithResetTtl, state=DISABLING in hbase:meta 2024-12-13T00:57:23,927 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithResetTtl to state=DISABLING 2024-12-13T00:57:23,928 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=57, ppid=56, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testExportWithResetTtl}] 2024-12-13T00:57:23,930 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=58, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=2b5681655899e82ccd476ae23464a76f, UNASSIGN}, {pid=59, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=30980404399d8ae8b8559ed36da41deb, UNASSIGN}] 2024-12-13T00:57:23,930 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=59, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=30980404399d8ae8b8559ed36da41deb, UNASSIGN 2024-12-13T00:57:23,931 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=58, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=2b5681655899e82ccd476ae23464a76f, UNASSIGN 2024-12-13T00:57:23,931 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=59 updating hbase:meta row=30980404399d8ae8b8559ed36da41deb, regionState=CLOSING, regionLocation=9812beb1e4cd,45231,1734051384968 2024-12-13T00:57:23,932 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=58 updating hbase:meta row=2b5681655899e82ccd476ae23464a76f, regionState=CLOSING, regionLocation=9812beb1e4cd,40687,1734051385054 2024-12-13T00:57:23,933 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-13T00:57:23,934 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=60, ppid=59, state=RUNNABLE; CloseRegionProcedure 30980404399d8ae8b8559ed36da41deb, server=9812beb1e4cd,45231,1734051384968}] 2024-12-13T00:57:23,934 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-13T00:57:23,935 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=61, ppid=58, state=RUNNABLE; CloseRegionProcedure 2b5681655899e82ccd476ae23464a76f, server=9812beb1e4cd,40687,1734051385054}] 2024-12-13T00:57:24,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=56 2024-12-13T00:57:24,087 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,45231,1734051384968 2024-12-13T00:57:24,088 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(124): Close 30980404399d8ae8b8559ed36da41deb 2024-12-13T00:57:24,088 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-13T00:57:24,088 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1681): Closing 30980404399d8ae8b8559ed36da41deb, disabling compactions & flushes 2024-12-13T00:57:24,088 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1703): Closing region testtb-testExportWithResetTtl,1,1734051420283.30980404399d8ae8b8559ed36da41deb. 2024-12-13T00:57:24,088 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithResetTtl,1,1734051420283.30980404399d8ae8b8559ed36da41deb. 2024-12-13T00:57:24,088 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,40687,1734051385054 2024-12-13T00:57:24,088 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithResetTtl,1,1734051420283.30980404399d8ae8b8559ed36da41deb. after waiting 0 ms 2024-12-13T00:57:24,088 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithResetTtl,1,1734051420283.30980404399d8ae8b8559ed36da41deb. 2024-12-13T00:57:24,089 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(124): Close 2b5681655899e82ccd476ae23464a76f 2024-12-13T00:57:24,089 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-13T00:57:24,089 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1681): Closing 2b5681655899e82ccd476ae23464a76f, disabling compactions & flushes 2024-12-13T00:57:24,089 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1703): Closing region testtb-testExportWithResetTtl,,1734051420283.2b5681655899e82ccd476ae23464a76f. 2024-12-13T00:57:24,089 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithResetTtl,,1734051420283.2b5681655899e82ccd476ae23464a76f. 2024-12-13T00:57:24,090 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithResetTtl,,1734051420283.2b5681655899e82ccd476ae23464a76f. after waiting 0 ms 2024-12-13T00:57:24,090 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithResetTtl,,1734051420283.2b5681655899e82ccd476ae23464a76f. 2024-12-13T00:57:24,101 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithResetTtl/30980404399d8ae8b8559ed36da41deb/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-13T00:57:24,102 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithResetTtl/2b5681655899e82ccd476ae23464a76f/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-13T00:57:24,102 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-13T00:57:24,103 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-13T00:57:24,103 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1922): Closed testtb-testExportWithResetTtl,1,1734051420283.30980404399d8ae8b8559ed36da41deb. 2024-12-13T00:57:24,103 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1922): Closed testtb-testExportWithResetTtl,,1734051420283.2b5681655899e82ccd476ae23464a76f. 2024-12-13T00:57:24,103 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1635): Region close journal for 30980404399d8ae8b8559ed36da41deb: 2024-12-13T00:57:24,103 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1635): Region close journal for 2b5681655899e82ccd476ae23464a76f: 2024-12-13T00:57:24,105 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(170): Closed 30980404399d8ae8b8559ed36da41deb 2024-12-13T00:57:24,106 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=59 updating hbase:meta row=30980404399d8ae8b8559ed36da41deb, regionState=CLOSED 2024-12-13T00:57:24,106 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(170): Closed 2b5681655899e82ccd476ae23464a76f 2024-12-13T00:57:24,107 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=58 updating hbase:meta row=2b5681655899e82ccd476ae23464a76f, regionState=CLOSED 2024-12-13T00:57:24,110 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=60, resume processing ppid=59 2024-12-13T00:57:24,110 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=61, resume processing ppid=58 2024-12-13T00:57:24,110 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=60, ppid=59, state=SUCCESS; CloseRegionProcedure 30980404399d8ae8b8559ed36da41deb, server=9812beb1e4cd,45231,1734051384968 in 174 msec 2024-12-13T00:57:24,111 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=59, ppid=57, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=30980404399d8ae8b8559ed36da41deb, UNASSIGN in 181 msec 2024-12-13T00:57:24,111 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=61, ppid=58, state=SUCCESS; CloseRegionProcedure 2b5681655899e82ccd476ae23464a76f, server=9812beb1e4cd,40687,1734051385054 in 173 msec 2024-12-13T00:57:24,112 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=58, resume processing ppid=57 2024-12-13T00:57:24,112 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=58, ppid=57, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=2b5681655899e82ccd476ae23464a76f, UNASSIGN in 181 msec 2024-12-13T00:57:24,115 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=57, resume processing ppid=56 2024-12-13T00:57:24,115 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=57, ppid=56, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testExportWithResetTtl in 185 msec 2024-12-13T00:57:24,116 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734051444116"}]},"ts":"1734051444116"} 2024-12-13T00:57:24,117 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithResetTtl, state=DISABLED in hbase:meta 2024-12-13T00:57:24,119 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithResetTtl to state=DISABLED 2024-12-13T00:57:24,121 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=56, state=SUCCESS; DisableTableProcedure table=testtb-testExportWithResetTtl in 201 msec 2024-12-13T00:57:24,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=56 2024-12-13T00:57:24,227 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testExportWithResetTtl, procId: 56 completed 2024-12-13T00:57:24,227 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testExportWithResetTtl 2024-12-13T00:57:24,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] procedure2.ProcedureExecutor(1098): Stored pid=62, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-13T00:57:24,229 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=62, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-13T00:57:24,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testExportWithResetTtl 2024-12-13T00:57:24,230 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=62, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-13T00:57:24,232 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40687 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testExportWithResetTtl 2024-12-13T00:57:24,234 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithResetTtl/2b5681655899e82ccd476ae23464a76f 2024-12-13T00:57:24,234 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithResetTtl/30980404399d8ae8b8559ed36da41deb 2024-12-13T00:57:24,235 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45231-0x10088dfff310002, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-13T00:57:24,235 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-13T00:57:24,235 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46075-0x10088dfff310001, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-13T00:57:24,236 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40687-0x10088dfff310003, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-13T00:57:24,236 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-12-13T00:57:24,236 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-12-13T00:57:24,236 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-12-13T00:57:24,237 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-12-13T00:57:24,237 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithResetTtl/30980404399d8ae8b8559ed36da41deb/cf, FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithResetTtl/30980404399d8ae8b8559ed36da41deb/recovered.edits] 2024-12-13T00:57:24,238 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithResetTtl/2b5681655899e82ccd476ae23464a76f/cf, FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithResetTtl/2b5681655899e82ccd476ae23464a76f/recovered.edits] 2024-12-13T00:57:24,238 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45231-0x10088dfff310002, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-13T00:57:24,238 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40687-0x10088dfff310003, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-13T00:57:24,238 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46075-0x10088dfff310001, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-13T00:57:24,238 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40687-0x10088dfff310003, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T00:57:24,238 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46075-0x10088dfff310001, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T00:57:24,239 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-13T00:57:24,239 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45231-0x10088dfff310002, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T00:57:24,239 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T00:57:24,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=62 2024-12-13T00:57:24,243 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithResetTtl/30980404399d8ae8b8559ed36da41deb/cf/01cd4e65725d425cb7c2a3efffb5fef0 to hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/archive/data/default/testtb-testExportWithResetTtl/30980404399d8ae8b8559ed36da41deb/cf/01cd4e65725d425cb7c2a3efffb5fef0 2024-12-13T00:57:24,244 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithResetTtl/2b5681655899e82ccd476ae23464a76f/cf/f3a4129e7981480b9ae55058dba51c65 to hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/archive/data/default/testtb-testExportWithResetTtl/2b5681655899e82ccd476ae23464a76f/cf/f3a4129e7981480b9ae55058dba51c65 2024-12-13T00:57:24,248 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithResetTtl/30980404399d8ae8b8559ed36da41deb/recovered.edits/9.seqid to hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/archive/data/default/testtb-testExportWithResetTtl/30980404399d8ae8b8559ed36da41deb/recovered.edits/9.seqid 2024-12-13T00:57:24,248 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithResetTtl/30980404399d8ae8b8559ed36da41deb 2024-12-13T00:57:24,248 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithResetTtl/2b5681655899e82ccd476ae23464a76f/recovered.edits/9.seqid to hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/archive/data/default/testtb-testExportWithResetTtl/2b5681655899e82ccd476ae23464a76f/recovered.edits/9.seqid 2024-12-13T00:57:24,249 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithResetTtl/2b5681655899e82ccd476ae23464a76f 2024-12-13T00:57:24,249 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithResetTtl regions 2024-12-13T00:57:24,252 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=62, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-13T00:57:24,254 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testExportWithResetTtl from hbase:meta 2024-12-13T00:57:24,257 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportWithResetTtl' descriptor. 2024-12-13T00:57:24,259 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=62, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-13T00:57:24,259 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportWithResetTtl' from region states. 2024-12-13T00:57:24,259 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl,,1734051420283.2b5681655899e82ccd476ae23464a76f.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734051444259"}]},"ts":"9223372036854775807"} 2024-12-13T00:57:24,259 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl,1,1734051420283.30980404399d8ae8b8559ed36da41deb.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734051444259"}]},"ts":"9223372036854775807"} 2024-12-13T00:57:24,264 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-13T00:57:24,264 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 2b5681655899e82ccd476ae23464a76f, NAME => 'testtb-testExportWithResetTtl,,1734051420283.2b5681655899e82ccd476ae23464a76f.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 30980404399d8ae8b8559ed36da41deb, NAME => 'testtb-testExportWithResetTtl,1,1734051420283.30980404399d8ae8b8559ed36da41deb.', STARTKEY => '1', ENDKEY => ''}] 2024-12-13T00:57:24,264 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportWithResetTtl' as deleted. 2024-12-13T00:57:24,264 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734051444264"}]},"ts":"9223372036854775807"} 2024-12-13T00:57:24,266 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testExportWithResetTtl state from META 2024-12-13T00:57:24,269 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=62, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-13T00:57:24,270 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=62, state=SUCCESS; DeleteTableProcedure table=testtb-testExportWithResetTtl in 41 msec 2024-12-13T00:57:24,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=62 2024-12-13T00:57:24,342 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testExportWithResetTtl, procId: 62 completed 2024-12-13T00:57:24,352 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithResetTtl" 2024-12-13T00:57:24,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testExportWithResetTtl 2024-12-13T00:57:24,356 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb-testExportWithResetTtl" 2024-12-13T00:57:24,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb-testExportWithResetTtl 2024-12-13T00:57:24,360 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithResetTtl" 2024-12-13T00:57:24,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testExportWithResetTtl 2024-12-13T00:57:24,389 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportWithResetTtl Thread=783 (was 769) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1349531497_22 at /127.0.0.1:59764 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool.commonPool-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: hconnection-0x71b2f156-shared-pool-16 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x71b2f156-shared-pool-13 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 25650) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1349531497_22 at /127.0.0.1:42042 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-2015 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: HFileArchiver-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x71b2f156-shared-pool-15 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1941338361) connection to localhost/127.0.0.1:36279 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RS-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1349531497_22 at /127.0.0.1:41618 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x71b2f156-shared-pool-14 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1941338361) connection to localhost/127.0.0.1:39373 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_321803027_1 at /127.0.0.1:36290 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36279 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43849 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x71b2f156-shared-pool-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39373 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x71b2f156-shared-pool-12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=780 (was 771) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=592 (was 518) - SystemLoadAverage LEAK? -, ProcessCount=20 (was 20), AvailableMemoryMB=4235 (was 4691) 2024-12-13T00:57:24,389 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=783 is superior to 500 2024-12-13T00:57:24,409 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportFileSystemState Thread=783, OpenFileDescriptor=780, MaxFileDescriptor=1048576, SystemLoadAverage=592, ProcessCount=20, AvailableMemoryMB=4234 2024-12-13T00:57:24,410 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=783 is superior to 500 2024-12-13T00:57:24,412 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-13T00:57:24,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] procedure2.ProcedureExecutor(1098): Stored pid=63, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testExportFileSystemState 2024-12-13T00:57:24,414 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_PRE_OPERATION 2024-12-13T00:57:24,414 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T00:57:24,415 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemState" procId is: 63 2024-12-13T00:57:24,415 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-13T00:57:24,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-13T00:57:24,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741961_1137 (size=407) 2024-12-13T00:57:24,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741961_1137 (size=407) 2024-12-13T00:57:24,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741961_1137 (size=407) 2024-12-13T00:57:24,445 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => a63180885f4213bf0dc326227a9fd1b4, NAME => 'testtb-testExportFileSystemState,,1734051444411.a63180885f4213bf0dc326227a9fd1b4.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee 2024-12-13T00:57:24,445 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => ec0b71eb23afa1c8964152a729ede4eb, NAME => 'testtb-testExportFileSystemState,1,1734051444411.ec0b71eb23afa1c8964152a729ede4eb.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee 2024-12-13T00:57:24,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741962_1138 (size=68) 2024-12-13T00:57:24,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741962_1138 (size=68) 2024-12-13T00:57:24,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741962_1138 (size=68) 2024-12-13T00:57:24,471 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemState,1,1734051444411.ec0b71eb23afa1c8964152a729ede4eb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T00:57:24,471 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1681): Closing ec0b71eb23afa1c8964152a729ede4eb, disabling compactions & flushes 2024-12-13T00:57:24,471 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemState,1,1734051444411.ec0b71eb23afa1c8964152a729ede4eb. 2024-12-13T00:57:24,471 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemState,1,1734051444411.ec0b71eb23afa1c8964152a729ede4eb. 2024-12-13T00:57:24,472 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemState,1,1734051444411.ec0b71eb23afa1c8964152a729ede4eb. after waiting 0 ms 2024-12-13T00:57:24,472 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemState,1,1734051444411.ec0b71eb23afa1c8964152a729ede4eb. 2024-12-13T00:57:24,472 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemState,1,1734051444411.ec0b71eb23afa1c8964152a729ede4eb. 2024-12-13T00:57:24,472 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1635): Region close journal for ec0b71eb23afa1c8964152a729ede4eb: 2024-12-13T00:57:24,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741963_1139 (size=68) 2024-12-13T00:57:24,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741963_1139 (size=68) 2024-12-13T00:57:24,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741963_1139 (size=68) 2024-12-13T00:57:24,479 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemState,,1734051444411.a63180885f4213bf0dc326227a9fd1b4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T00:57:24,480 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1681): Closing a63180885f4213bf0dc326227a9fd1b4, disabling compactions & flushes 2024-12-13T00:57:24,480 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemState,,1734051444411.a63180885f4213bf0dc326227a9fd1b4. 2024-12-13T00:57:24,480 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemState,,1734051444411.a63180885f4213bf0dc326227a9fd1b4. 2024-12-13T00:57:24,480 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemState,,1734051444411.a63180885f4213bf0dc326227a9fd1b4. after waiting 0 ms 2024-12-13T00:57:24,480 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemState,,1734051444411.a63180885f4213bf0dc326227a9fd1b4. 2024-12-13T00:57:24,480 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemState,,1734051444411.a63180885f4213bf0dc326227a9fd1b4. 2024-12-13T00:57:24,480 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1635): Region close journal for a63180885f4213bf0dc326227a9fd1b4: 2024-12-13T00:57:24,481 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_ADD_TO_META 2024-12-13T00:57:24,482 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportFileSystemState,1,1734051444411.ec0b71eb23afa1c8964152a729ede4eb.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1734051444481"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734051444481"}]},"ts":"1734051444481"} 2024-12-13T00:57:24,482 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportFileSystemState,,1734051444411.a63180885f4213bf0dc326227a9fd1b4.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1734051444481"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734051444481"}]},"ts":"1734051444481"} 2024-12-13T00:57:24,484 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-13T00:57:24,485 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-13T00:57:24,485 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734051444485"}]},"ts":"1734051444485"} 2024-12-13T00:57:24,487 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemState, state=ENABLING in hbase:meta 2024-12-13T00:57:24,491 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(202): Hosts are {9812beb1e4cd=0} racks are {/default-rack=0} 2024-12-13T00:57:24,493 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-13T00:57:24,493 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-13T00:57:24,493 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-13T00:57:24,493 INFO [PEWorker-2 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-13T00:57:24,493 INFO [PEWorker-2 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-13T00:57:24,493 INFO [PEWorker-2 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-13T00:57:24,493 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-13T00:57:24,493 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=a63180885f4213bf0dc326227a9fd1b4, ASSIGN}, {pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=ec0b71eb23afa1c8964152a729ede4eb, ASSIGN}] 2024-12-13T00:57:24,494 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=ec0b71eb23afa1c8964152a729ede4eb, ASSIGN 2024-12-13T00:57:24,494 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=a63180885f4213bf0dc326227a9fd1b4, ASSIGN 2024-12-13T00:57:24,495 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=ec0b71eb23afa1c8964152a729ede4eb, ASSIGN; state=OFFLINE, location=9812beb1e4cd,40687,1734051385054; forceNewPlan=false, retain=false 2024-12-13T00:57:24,495 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=a63180885f4213bf0dc326227a9fd1b4, ASSIGN; state=OFFLINE, location=9812beb1e4cd,46075,1734051384875; forceNewPlan=false, retain=false 2024-12-13T00:57:24,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-13T00:57:24,530 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl 2024-12-13T00:57:24,531 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl 2024-12-13T00:57:24,646 INFO [9812beb1e4cd:38293 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-13T00:57:24,647 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=64 updating hbase:meta row=a63180885f4213bf0dc326227a9fd1b4, regionState=OPENING, regionLocation=9812beb1e4cd,46075,1734051384875 2024-12-13T00:57:24,647 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=65 updating hbase:meta row=ec0b71eb23afa1c8964152a729ede4eb, regionState=OPENING, regionLocation=9812beb1e4cd,40687,1734051385054 2024-12-13T00:57:24,653 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=66, ppid=65, state=RUNNABLE; OpenRegionProcedure ec0b71eb23afa1c8964152a729ede4eb, server=9812beb1e4cd,40687,1734051385054}] 2024-12-13T00:57:24,654 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=67, ppid=64, state=RUNNABLE; OpenRegionProcedure a63180885f4213bf0dc326227a9fd1b4, server=9812beb1e4cd,46075,1734051384875}] 2024-12-13T00:57:24,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-13T00:57:24,805 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,40687,1734051385054 2024-12-13T00:57:24,807 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,46075,1734051384875 2024-12-13T00:57:24,809 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemState,1,1734051444411.ec0b71eb23afa1c8964152a729ede4eb. 2024-12-13T00:57:24,809 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7285): Opening region: {ENCODED => ec0b71eb23afa1c8964152a729ede4eb, NAME => 'testtb-testExportFileSystemState,1,1734051444411.ec0b71eb23afa1c8964152a729ede4eb.', STARTKEY => '1', ENDKEY => ''} 2024-12-13T00:57:24,810 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemState,1,1734051444411.ec0b71eb23afa1c8964152a729ede4eb. service=AccessControlService 2024-12-13T00:57:24,810 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-13T00:57:24,810 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemState ec0b71eb23afa1c8964152a729ede4eb 2024-12-13T00:57:24,810 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemState,1,1734051444411.ec0b71eb23afa1c8964152a729ede4eb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T00:57:24,811 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7327): checking encryption for ec0b71eb23afa1c8964152a729ede4eb 2024-12-13T00:57:24,811 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7330): checking classloading for ec0b71eb23afa1c8964152a729ede4eb 2024-12-13T00:57:24,811 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemState,,1734051444411.a63180885f4213bf0dc326227a9fd1b4. 2024-12-13T00:57:24,811 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7285): Opening region: {ENCODED => a63180885f4213bf0dc326227a9fd1b4, NAME => 'testtb-testExportFileSystemState,,1734051444411.a63180885f4213bf0dc326227a9fd1b4.', STARTKEY => '', ENDKEY => '1'} 2024-12-13T00:57:24,811 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemState,,1734051444411.a63180885f4213bf0dc326227a9fd1b4. service=AccessControlService 2024-12-13T00:57:24,811 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-13T00:57:24,811 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemState a63180885f4213bf0dc326227a9fd1b4 2024-12-13T00:57:24,812 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemState,,1734051444411.a63180885f4213bf0dc326227a9fd1b4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T00:57:24,812 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7327): checking encryption for a63180885f4213bf0dc326227a9fd1b4 2024-12-13T00:57:24,812 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7330): checking classloading for a63180885f4213bf0dc326227a9fd1b4 2024-12-13T00:57:24,812 INFO [StoreOpener-ec0b71eb23afa1c8964152a729ede4eb-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region ec0b71eb23afa1c8964152a729ede4eb 2024-12-13T00:57:24,813 INFO [StoreOpener-a63180885f4213bf0dc326227a9fd1b4-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region a63180885f4213bf0dc326227a9fd1b4 2024-12-13T00:57:24,814 INFO [StoreOpener-ec0b71eb23afa1c8964152a729ede4eb-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ec0b71eb23afa1c8964152a729ede4eb columnFamilyName cf 2024-12-13T00:57:24,814 DEBUG [StoreOpener-ec0b71eb23afa1c8964152a729ede4eb-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T00:57:24,814 INFO [StoreOpener-ec0b71eb23afa1c8964152a729ede4eb-1 {}] regionserver.HStore(327): Store=ec0b71eb23afa1c8964152a729ede4eb/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T00:57:24,815 INFO [StoreOpener-a63180885f4213bf0dc326227a9fd1b4-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a63180885f4213bf0dc326227a9fd1b4 columnFamilyName cf 2024-12-13T00:57:24,815 DEBUG [StoreOpener-a63180885f4213bf0dc326227a9fd1b4-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T00:57:24,815 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemState/ec0b71eb23afa1c8964152a729ede4eb 2024-12-13T00:57:24,816 INFO [StoreOpener-a63180885f4213bf0dc326227a9fd1b4-1 {}] regionserver.HStore(327): Store=a63180885f4213bf0dc326227a9fd1b4/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T00:57:24,816 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemState/ec0b71eb23afa1c8964152a729ede4eb 2024-12-13T00:57:24,817 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemState/a63180885f4213bf0dc326227a9fd1b4 2024-12-13T00:57:24,817 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemState/a63180885f4213bf0dc326227a9fd1b4 2024-12-13T00:57:24,818 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1085): writing seq id for ec0b71eb23afa1c8964152a729ede4eb 2024-12-13T00:57:24,820 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1085): writing seq id for a63180885f4213bf0dc326227a9fd1b4 2024-12-13T00:57:24,821 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemState/ec0b71eb23afa1c8964152a729ede4eb/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T00:57:24,821 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1102): Opened ec0b71eb23afa1c8964152a729ede4eb; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63632385, jitterRate=-0.051803573966026306}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-13T00:57:24,822 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1001): Region open journal for ec0b71eb23afa1c8964152a729ede4eb: 2024-12-13T00:57:24,822 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemState/a63180885f4213bf0dc326227a9fd1b4/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T00:57:24,823 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemState,1,1734051444411.ec0b71eb23afa1c8964152a729ede4eb., pid=66, masterSystemTime=1734051444805 2024-12-13T00:57:24,823 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1102): Opened a63180885f4213bf0dc326227a9fd1b4; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66980649, jitterRate=-0.0019105523824691772}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-13T00:57:24,823 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1001): Region open journal for a63180885f4213bf0dc326227a9fd1b4: 2024-12-13T00:57:24,824 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemState,,1734051444411.a63180885f4213bf0dc326227a9fd1b4., pid=67, masterSystemTime=1734051444807 2024-12-13T00:57:24,824 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemState,1,1734051444411.ec0b71eb23afa1c8964152a729ede4eb. 2024-12-13T00:57:24,824 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemState,1,1734051444411.ec0b71eb23afa1c8964152a729ede4eb. 2024-12-13T00:57:24,825 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=65 updating hbase:meta row=ec0b71eb23afa1c8964152a729ede4eb, regionState=OPEN, openSeqNum=2, regionLocation=9812beb1e4cd,40687,1734051385054 2024-12-13T00:57:24,825 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemState,,1734051444411.a63180885f4213bf0dc326227a9fd1b4. 2024-12-13T00:57:24,825 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemState,,1734051444411.a63180885f4213bf0dc326227a9fd1b4. 2024-12-13T00:57:24,826 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=64 updating hbase:meta row=a63180885f4213bf0dc326227a9fd1b4, regionState=OPEN, openSeqNum=2, regionLocation=9812beb1e4cd,46075,1734051384875 2024-12-13T00:57:24,829 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=66, resume processing ppid=65 2024-12-13T00:57:24,829 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=67, resume processing ppid=64 2024-12-13T00:57:24,829 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=66, ppid=65, state=SUCCESS; OpenRegionProcedure ec0b71eb23afa1c8964152a729ede4eb, server=9812beb1e4cd,40687,1734051385054 in 175 msec 2024-12-13T00:57:24,829 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=67, ppid=64, state=SUCCESS; OpenRegionProcedure a63180885f4213bf0dc326227a9fd1b4, server=9812beb1e4cd,46075,1734051384875 in 174 msec 2024-12-13T00:57:24,830 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=65, ppid=63, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=ec0b71eb23afa1c8964152a729ede4eb, ASSIGN in 336 msec 2024-12-13T00:57:24,831 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=64, resume processing ppid=63 2024-12-13T00:57:24,831 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=64, ppid=63, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=a63180885f4213bf0dc326227a9fd1b4, ASSIGN in 336 msec 2024-12-13T00:57:24,832 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-13T00:57:24,832 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734051444832"}]},"ts":"1734051444832"} 2024-12-13T00:57:24,833 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemState, state=ENABLED in hbase:meta 2024-12-13T00:57:24,836 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_POST_OPERATION 2024-12-13T00:57:24,836 DEBUG [PEWorker-5 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testExportFileSystemState jenkins: RWXCA 2024-12-13T00:57:24,838 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40687 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-12-13T00:57:24,840 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T00:57:24,840 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46075-0x10088dfff310001, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T00:57:24,840 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40687-0x10088dfff310003, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T00:57:24,840 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45231-0x10088dfff310002, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T00:57:24,842 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-13T00:57:24,842 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-13T00:57:24,842 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-13T00:57:24,843 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-13T00:57:24,844 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=63, state=SUCCESS; CreateTableProcedure table=testtb-testExportFileSystemState in 430 msec 2024-12-13T00:57:25,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-13T00:57:25,023 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testExportFileSystemState, procId: 63 completed 2024-12-13T00:57:25,023 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testtb-testExportFileSystemState get assigned. Timeout = 60000ms 2024-12-13T00:57:25,023 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-13T00:57:25,028 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testtb-testExportFileSystemState assigned to meta. Checking AM states. 2024-12-13T00:57:25,028 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-13T00:57:25,028 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testtb-testExportFileSystemState assigned. 2024-12-13T00:57:25,032 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-13T00:57:25,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734051445032 (current time:1734051445032). 2024-12-13T00:57:25,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-13T00:57:25,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testExportFileSystemState VERSION not specified, setting to 2 2024-12-13T00:57:25,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-13T00:57:25,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0b18f7c2 to 127.0.0.1:49424 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4989b5d8 2024-12-13T00:57:25,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2b277f9d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T00:57:25,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T00:57:25,040 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33908, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T00:57:25,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0b18f7c2 to 127.0.0.1:49424 2024-12-13T00:57:25,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T00:57:25,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3d7e8101 to 127.0.0.1:49424 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@332ff3d5 2024-12-13T00:57:25,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@23e4a9ba, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T00:57:25,048 DEBUG [hconnection-0x2dac731a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T00:57:25,049 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33914, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T00:57:25,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3d7e8101 to 127.0.0.1:49424 2024-12-13T00:57:25,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T00:57:25,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-12-13T00:57:25,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-13T00:57:25,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] procedure2.ProcedureExecutor(1098): Stored pid=68, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-13T00:57:25,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 68 2024-12-13T00:57:25,055 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-13T00:57:25,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-12-13T00:57:25,056 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-13T00:57:25,059 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-13T00:57:25,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741964_1140 (size=170) 2024-12-13T00:57:25,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741964_1140 (size=170) 2024-12-13T00:57:25,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741964_1140 (size=170) 2024-12-13T00:57:25,073 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-13T00:57:25,073 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=69, ppid=68, state=RUNNABLE; SnapshotRegionProcedure a63180885f4213bf0dc326227a9fd1b4}, {pid=70, ppid=68, state=RUNNABLE; SnapshotRegionProcedure ec0b71eb23afa1c8964152a729ede4eb}] 2024-12-13T00:57:25,074 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=70, ppid=68, state=RUNNABLE; SnapshotRegionProcedure ec0b71eb23afa1c8964152a729ede4eb 2024-12-13T00:57:25,074 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=69, ppid=68, state=RUNNABLE; SnapshotRegionProcedure a63180885f4213bf0dc326227a9fd1b4 2024-12-13T00:57:25,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-12-13T00:57:25,225 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,40687,1734051385054 2024-12-13T00:57:25,225 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,46075,1734051384875 2024-12-13T00:57:25,226 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40687 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=70 2024-12-13T00:57:25,226 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46075 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=69 2024-12-13T00:57:25,226 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,1,1734051444411.ec0b71eb23afa1c8964152a729ede4eb. 2024-12-13T00:57:25,227 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,,1734051444411.a63180885f4213bf0dc326227a9fd1b4. 2024-12-13T00:57:25,227 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.HRegion(2538): Flush status journal for a63180885f4213bf0dc326227a9fd1b4: 2024-12-13T00:57:25,227 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,,1734051444411.a63180885f4213bf0dc326227a9fd1b4. for emptySnaptb0-testExportFileSystemState completed. 2024-12-13T00:57:25,227 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,,1734051444411.a63180885f4213bf0dc326227a9fd1b4.' region-info for snapshot=emptySnaptb0-testExportFileSystemState 2024-12-13T00:57:25,227 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.HRegion(2538): Flush status journal for ec0b71eb23afa1c8964152a729ede4eb: 2024-12-13T00:57:25,227 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-13T00:57:25,227 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,1,1734051444411.ec0b71eb23afa1c8964152a729ede4eb. for emptySnaptb0-testExportFileSystemState completed. 2024-12-13T00:57:25,227 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-13T00:57:25,227 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,1,1734051444411.ec0b71eb23afa1c8964152a729ede4eb.' region-info for snapshot=emptySnaptb0-testExportFileSystemState 2024-12-13T00:57:25,228 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-13T00:57:25,228 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-13T00:57:25,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741966_1142 (size=71) 2024-12-13T00:57:25,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741966_1142 (size=71) 2024-12-13T00:57:25,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741966_1142 (size=71) 2024-12-13T00:57:25,236 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,1,1734051444411.ec0b71eb23afa1c8964152a729ede4eb. 2024-12-13T00:57:25,237 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=70 2024-12-13T00:57:25,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.HMaster(4106): Remote procedure done, pid=70 2024-12-13T00:57:25,237 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemState on region ec0b71eb23afa1c8964152a729ede4eb 2024-12-13T00:57:25,237 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=70, ppid=68, state=RUNNABLE; SnapshotRegionProcedure ec0b71eb23afa1c8964152a729ede4eb 2024-12-13T00:57:25,239 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=70, ppid=68, state=SUCCESS; SnapshotRegionProcedure ec0b71eb23afa1c8964152a729ede4eb in 165 msec 2024-12-13T00:57:25,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741965_1141 (size=71) 2024-12-13T00:57:25,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741965_1141 (size=71) 2024-12-13T00:57:25,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741965_1141 (size=71) 2024-12-13T00:57:25,243 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,,1734051444411.a63180885f4213bf0dc326227a9fd1b4. 2024-12-13T00:57:25,243 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=69 2024-12-13T00:57:25,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.HMaster(4106): Remote procedure done, pid=69 2024-12-13T00:57:25,243 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemState on region a63180885f4213bf0dc326227a9fd1b4 2024-12-13T00:57:25,243 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=69, ppid=68, state=RUNNABLE; SnapshotRegionProcedure a63180885f4213bf0dc326227a9fd1b4 2024-12-13T00:57:25,246 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=69, resume processing ppid=68 2024-12-13T00:57:25,246 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=69, ppid=68, state=SUCCESS; SnapshotRegionProcedure a63180885f4213bf0dc326227a9fd1b4 in 171 msec 2024-12-13T00:57:25,246 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-13T00:57:25,246 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-13T00:57:25,247 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-13T00:57:25,247 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemState 2024-12-13T00:57:25,248 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemState 2024-12-13T00:57:25,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741967_1143 (size=552) 2024-12-13T00:57:25,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741967_1143 (size=552) 2024-12-13T00:57:25,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741967_1143 (size=552) 2024-12-13T00:57:25,265 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-13T00:57:25,270 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-13T00:57:25,271 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemState to hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/emptySnaptb0-testExportFileSystemState 2024-12-13T00:57:25,272 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-13T00:57:25,273 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 68 2024-12-13T00:57:25,274 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=68, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } in 219 msec 2024-12-13T00:57:25,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-12-13T00:57:25,359 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemState, procId: 68 completed 2024-12-13T00:57:25,366 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46075 {}] regionserver.HRegion(8254): writing data to region testtb-testExportFileSystemState,,1734051444411.a63180885f4213bf0dc326227a9fd1b4. with WAL disabled. Data may be lost in the event of a crash. 2024-12-13T00:57:25,368 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40687 {}] regionserver.HRegion(8254): writing data to region testtb-testExportFileSystemState,1,1734051444411.ec0b71eb23afa1c8964152a729ede4eb. with WAL disabled. Data may be lost in the event of a crash. 2024-12-13T00:57:25,371 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testExportFileSystemState 2024-12-13T00:57:25,371 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testExportFileSystemState,,1734051444411.a63180885f4213bf0dc326227a9fd1b4. 2024-12-13T00:57:25,371 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-13T00:57:25,388 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-13T00:57:25,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734051445388 (current time:1734051445388). 2024-12-13T00:57:25,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-13T00:57:25,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testExportFileSystemState VERSION not specified, setting to 2 2024-12-13T00:57:25,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-13T00:57:25,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2ec1bae5 to 127.0.0.1:49424 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@603b3884 2024-12-13T00:57:25,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1819cf05, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T00:57:25,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T00:57:25,395 INFO [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33916, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T00:57:25,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2ec1bae5 to 127.0.0.1:49424 2024-12-13T00:57:25,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T00:57:25,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x60606087 to 127.0.0.1:49424 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@59269b17 2024-12-13T00:57:25,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@f9116d1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T00:57:25,403 DEBUG [hconnection-0x63cc0a15-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T00:57:25,404 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33920, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T00:57:25,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x60606087 to 127.0.0.1:49424 2024-12-13T00:57:25,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T00:57:25,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-12-13T00:57:25,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-13T00:57:25,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] procedure2.ProcedureExecutor(1098): Stored pid=71, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-13T00:57:25,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 71 2024-12-13T00:57:25,410 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-13T00:57:25,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-13T00:57:25,411 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-13T00:57:25,413 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-13T00:57:25,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741968_1144 (size=165) 2024-12-13T00:57:25,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741968_1144 (size=165) 2024-12-13T00:57:25,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741968_1144 (size=165) 2024-12-13T00:57:25,431 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-13T00:57:25,431 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=72, ppid=71, state=RUNNABLE; SnapshotRegionProcedure a63180885f4213bf0dc326227a9fd1b4}, {pid=73, ppid=71, state=RUNNABLE; SnapshotRegionProcedure ec0b71eb23afa1c8964152a729ede4eb}] 2024-12-13T00:57:25,432 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=73, ppid=71, state=RUNNABLE; SnapshotRegionProcedure ec0b71eb23afa1c8964152a729ede4eb 2024-12-13T00:57:25,432 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=72, ppid=71, state=RUNNABLE; SnapshotRegionProcedure a63180885f4213bf0dc326227a9fd1b4 2024-12-13T00:57:25,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-13T00:57:25,583 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,40687,1734051385054 2024-12-13T00:57:25,583 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,46075,1734051384875 2024-12-13T00:57:25,584 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46075 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=72 2024-12-13T00:57:25,584 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40687 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=73 2024-12-13T00:57:25,584 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,1,1734051444411.ec0b71eb23afa1c8964152a729ede4eb. 2024-12-13T00:57:25,584 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,,1734051444411.a63180885f4213bf0dc326227a9fd1b4. 2024-12-13T00:57:25,584 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(2837): Flushing ec0b71eb23afa1c8964152a729ede4eb 1/1 column families, dataSize=3.13 KB heapSize=7 KB 2024-12-13T00:57:25,584 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(2837): Flushing a63180885f4213bf0dc326227a9fd1b4 1/1 column families, dataSize=132 B heapSize=544 B 2024-12-13T00:57:25,602 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemState/a63180885f4213bf0dc326227a9fd1b4/.tmp/cf/1a9da160f3e8431d9edf07909e6b9c27 is 71, key is 05a7044c5acd32771e91928797759bef/cf:q/1734051445366/Put/seqid=0 2024-12-13T00:57:25,603 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemState/ec0b71eb23afa1c8964152a729ede4eb/.tmp/cf/26d71a1656604b5b875ecf147a09f11e is 71, key is 118bf9da5ddb3e27de912e470568c2bc/cf:q/1734051445368/Put/seqid=0 2024-12-13T00:57:25,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741969_1145 (size=5216) 2024-12-13T00:57:25,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741969_1145 (size=5216) 2024-12-13T00:57:25,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741969_1145 (size=5216) 2024-12-13T00:57:25,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741970_1146 (size=8392) 2024-12-13T00:57:25,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741970_1146 (size=8392) 2024-12-13T00:57:25,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741970_1146 (size=8392) 2024-12-13T00:57:25,613 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=132 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemState/a63180885f4213bf0dc326227a9fd1b4/.tmp/cf/1a9da160f3e8431d9edf07909e6b9c27 2024-12-13T00:57:25,619 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemState/a63180885f4213bf0dc326227a9fd1b4/.tmp/cf/1a9da160f3e8431d9edf07909e6b9c27 as hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemState/a63180885f4213bf0dc326227a9fd1b4/cf/1a9da160f3e8431d9edf07909e6b9c27 2024-12-13T00:57:25,624 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemState/a63180885f4213bf0dc326227a9fd1b4/cf/1a9da160f3e8431d9edf07909e6b9c27, entries=2, sequenceid=6, filesize=5.1 K 2024-12-13T00:57:25,628 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(3040): Finished flush of dataSize ~132 B/132, heapSize ~528 B/528, currentSize=0 B/0 for a63180885f4213bf0dc326227a9fd1b4 in 44ms, sequenceid=6, compaction requested=false 2024-12-13T00:57:25,628 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemState' 2024-12-13T00:57:25,629 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(2538): Flush status journal for a63180885f4213bf0dc326227a9fd1b4: 2024-12-13T00:57:25,629 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,,1734051444411.a63180885f4213bf0dc326227a9fd1b4. for snaptb0-testExportFileSystemState completed. 2024-12-13T00:57:25,629 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,,1734051444411.a63180885f4213bf0dc326227a9fd1b4.' region-info for snapshot=snaptb0-testExportFileSystemState 2024-12-13T00:57:25,629 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-13T00:57:25,629 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemState/a63180885f4213bf0dc326227a9fd1b4/cf/1a9da160f3e8431d9edf07909e6b9c27] hfiles 2024-12-13T00:57:25,629 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemState/a63180885f4213bf0dc326227a9fd1b4/cf/1a9da160f3e8431d9edf07909e6b9c27 for snapshot=snaptb0-testExportFileSystemState 2024-12-13T00:57:25,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741971_1147 (size=110) 2024-12-13T00:57:25,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741971_1147 (size=110) 2024-12-13T00:57:25,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741971_1147 (size=110) 2024-12-13T00:57:25,657 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,,1734051444411.a63180885f4213bf0dc326227a9fd1b4. 2024-12-13T00:57:25,657 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=72 2024-12-13T00:57:25,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.HMaster(4106): Remote procedure done, pid=72 2024-12-13T00:57:25,657 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemState on region a63180885f4213bf0dc326227a9fd1b4 2024-12-13T00:57:25,658 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=72, ppid=71, state=RUNNABLE; SnapshotRegionProcedure a63180885f4213bf0dc326227a9fd1b4 2024-12-13T00:57:25,659 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=72, ppid=71, state=SUCCESS; SnapshotRegionProcedure a63180885f4213bf0dc326227a9fd1b4 in 227 msec 2024-12-13T00:57:25,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-13T00:57:26,014 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.13 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemState/ec0b71eb23afa1c8964152a729ede4eb/.tmp/cf/26d71a1656604b5b875ecf147a09f11e 2024-12-13T00:57:26,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-13T00:57:26,022 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemState/ec0b71eb23afa1c8964152a729ede4eb/.tmp/cf/26d71a1656604b5b875ecf147a09f11e as hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemState/ec0b71eb23afa1c8964152a729ede4eb/cf/26d71a1656604b5b875ecf147a09f11e 2024-12-13T00:57:26,029 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemState/ec0b71eb23afa1c8964152a729ede4eb/cf/26d71a1656604b5b875ecf147a09f11e, entries=48, sequenceid=6, filesize=8.2 K 2024-12-13T00:57:26,030 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(3040): Finished flush of dataSize ~3.13 KB/3204, heapSize ~6.98 KB/7152, currentSize=0 B/0 for ec0b71eb23afa1c8964152a729ede4eb in 446ms, sequenceid=6, compaction requested=false 2024-12-13T00:57:26,030 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(2538): Flush status journal for ec0b71eb23afa1c8964152a729ede4eb: 2024-12-13T00:57:26,030 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,1,1734051444411.ec0b71eb23afa1c8964152a729ede4eb. for snaptb0-testExportFileSystemState completed. 2024-12-13T00:57:26,031 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,1,1734051444411.ec0b71eb23afa1c8964152a729ede4eb.' region-info for snapshot=snaptb0-testExportFileSystemState 2024-12-13T00:57:26,031 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-13T00:57:26,031 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemState/ec0b71eb23afa1c8964152a729ede4eb/cf/26d71a1656604b5b875ecf147a09f11e] hfiles 2024-12-13T00:57:26,031 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemState/ec0b71eb23afa1c8964152a729ede4eb/cf/26d71a1656604b5b875ecf147a09f11e for snapshot=snaptb0-testExportFileSystemState 2024-12-13T00:57:26,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741972_1148 (size=110) 2024-12-13T00:57:26,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741972_1148 (size=110) 2024-12-13T00:57:26,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741972_1148 (size=110) 2024-12-13T00:57:26,051 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,1,1734051444411.ec0b71eb23afa1c8964152a729ede4eb. 2024-12-13T00:57:26,052 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=73 2024-12-13T00:57:26,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.HMaster(4106): Remote procedure done, pid=73 2024-12-13T00:57:26,052 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemState on region ec0b71eb23afa1c8964152a729ede4eb 2024-12-13T00:57:26,052 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=73, ppid=71, state=RUNNABLE; SnapshotRegionProcedure ec0b71eb23afa1c8964152a729ede4eb 2024-12-13T00:57:26,055 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=73, resume processing ppid=71 2024-12-13T00:57:26,056 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-13T00:57:26,056 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=73, ppid=71, state=SUCCESS; SnapshotRegionProcedure ec0b71eb23afa1c8964152a729ede4eb in 622 msec 2024-12-13T00:57:26,056 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-13T00:57:26,057 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-13T00:57:26,057 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemState 2024-12-13T00:57:26,058 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-12-13T00:57:26,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741973_1149 (size=630) 2024-12-13T00:57:26,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741973_1149 (size=630) 2024-12-13T00:57:26,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741973_1149 (size=630) 2024-12-13T00:57:26,077 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-13T00:57:26,084 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-13T00:57:26,084 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState to hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-12-13T00:57:26,086 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-13T00:57:26,086 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 71 2024-12-13T00:57:26,087 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } in 678 msec 2024-12-13T00:57:26,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-13T00:57:26,516 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemState, procId: 71 completed 2024-12-13T00:57:26,516 INFO [Time-limited test {}] snapshot.TestExportSnapshot(468): HDFS export destination path: hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/export-test/export-1734051446516 2024-12-13T00:57:26,516 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=hdfs://localhost:46775, tgtDir=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/export-test/export-1734051446516, rawTgtDir=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/export-test/export-1734051446516, srcFsUri=hdfs://localhost:46775, srcDir=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee 2024-12-13T00:57:26,563 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:46775, inputRoot=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee 2024-12-13T00:57:26,563 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1349531497_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/export-test/export-1734051446516, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/export-test/export-1734051446516/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-12-13T00:57:26,566 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-13T00:57:26,574 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/snaptb0-testExportFileSystemState to hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/export-test/export-1734051446516/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-12-13T00:57:26,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741975_1151 (size=630) 2024-12-13T00:57:26,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741975_1151 (size=630) 2024-12-13T00:57:26,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741975_1151 (size=630) 2024-12-13T00:57:26,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741974_1150 (size=165) 2024-12-13T00:57:26,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741974_1150 (size=165) 2024-12-13T00:57:26,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741974_1150 (size=165) 2024-12-13T00:57:26,640 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar 2024-12-13T00:57:26,640 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-13T00:57:26,641 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-13T00:57:26,641 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-13T00:57:27,874 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/hadoop-10771524566973845093.jar 2024-12-13T00:57:27,875 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-13T00:57:27,875 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-13T00:57:27,956 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734051393214_0002_000001 (auth:SIMPLE) from 127.0.0.1:60428 2024-12-13T00:57:27,956 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/hadoop-12627871521382813035.jar 2024-12-13T00:57:27,957 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-13T00:57:27,957 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-13T00:57:27,958 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-13T00:57:27,958 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-13T00:57:27,958 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-13T00:57:27,958 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-13T00:57:27,959 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-13T00:57:27,959 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-13T00:57:27,959 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-13T00:57:27,960 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-13T00:57:27,960 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-13T00:57:27,960 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-13T00:57:27,960 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-13T00:57:27,961 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-13T00:57:27,961 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-13T00:57:27,961 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-13T00:57:27,962 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-13T00:57:27,962 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-13T00:57:27,962 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-13T00:57:27,963 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-13T00:57:27,963 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-13T00:57:27,963 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-13T00:57:27,964 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-13T00:57:27,964 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-13T00:57:27,964 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-13T00:57:27,970 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/MiniMRCluster_1870862804/yarn-2296387876/MiniMRCluster_1870862804-localDir-nm-1_2/usercache/jenkins/appcache/application_1734051393214_0002/container_1734051393214_0002_01_000001/launch_container.sh] 2024-12-13T00:57:27,971 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/MiniMRCluster_1870862804/yarn-2296387876/MiniMRCluster_1870862804-localDir-nm-1_2/usercache/jenkins/appcache/application_1734051393214_0002/container_1734051393214_0002_01_000001/container_tokens] 2024-12-13T00:57:27,971 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/MiniMRCluster_1870862804/yarn-2296387876/MiniMRCluster_1870862804-localDir-nm-1_2/usercache/jenkins/appcache/application_1734051393214_0002/container_1734051393214_0002_01_000001/sysfs] 2024-12-13T00:57:28,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741976_1152 (size=127628) 2024-12-13T00:57:28,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741976_1152 (size=127628) 2024-12-13T00:57:28,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741976_1152 (size=127628) 2024-12-13T00:57:28,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741977_1153 (size=2172137) 2024-12-13T00:57:28,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741977_1153 (size=2172137) 2024-12-13T00:57:28,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741977_1153 (size=2172137) 2024-12-13T00:57:28,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741978_1154 (size=213228) 2024-12-13T00:57:28,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741978_1154 (size=213228) 2024-12-13T00:57:28,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741978_1154 (size=213228) 2024-12-13T00:57:28,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741979_1155 (size=1877034) 2024-12-13T00:57:28,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741979_1155 (size=1877034) 2024-12-13T00:57:28,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741979_1155 (size=1877034) 2024-12-13T00:57:28,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741980_1156 (size=533455) 2024-12-13T00:57:28,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741980_1156 (size=533455) 2024-12-13T00:57:28,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741980_1156 (size=533455) 2024-12-13T00:57:28,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741981_1157 (size=7280644) 2024-12-13T00:57:28,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741981_1157 (size=7280644) 2024-12-13T00:57:28,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741981_1157 (size=7280644) 2024-12-13T00:57:28,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741982_1158 (size=4188619) 2024-12-13T00:57:28,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741982_1158 (size=4188619) 2024-12-13T00:57:28,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741982_1158 (size=4188619) 2024-12-13T00:57:28,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741983_1159 (size=20406) 2024-12-13T00:57:28,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741983_1159 (size=20406) 2024-12-13T00:57:28,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741983_1159 (size=20406) 2024-12-13T00:57:28,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741984_1160 (size=75495) 2024-12-13T00:57:28,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741984_1160 (size=75495) 2024-12-13T00:57:28,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741984_1160 (size=75495) 2024-12-13T00:57:28,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741985_1161 (size=45609) 2024-12-13T00:57:28,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741985_1161 (size=45609) 2024-12-13T00:57:28,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741985_1161 (size=45609) 2024-12-13T00:57:28,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741986_1162 (size=110084) 2024-12-13T00:57:28,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741986_1162 (size=110084) 2024-12-13T00:57:28,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741986_1162 (size=110084) 2024-12-13T00:57:28,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741987_1163 (size=1323991) 2024-12-13T00:57:28,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741987_1163 (size=1323991) 2024-12-13T00:57:28,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741987_1163 (size=1323991) 2024-12-13T00:57:28,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741988_1164 (size=23076) 2024-12-13T00:57:28,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741988_1164 (size=23076) 2024-12-13T00:57:28,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741988_1164 (size=23076) 2024-12-13T00:57:28,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741989_1165 (size=126803) 2024-12-13T00:57:28,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741989_1165 (size=126803) 2024-12-13T00:57:28,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741989_1165 (size=126803) 2024-12-13T00:57:28,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741990_1166 (size=322274) 2024-12-13T00:57:28,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741990_1166 (size=322274) 2024-12-13T00:57:28,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741990_1166 (size=322274) 2024-12-13T00:57:28,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741991_1167 (size=1832290) 2024-12-13T00:57:28,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741991_1167 (size=1832290) 2024-12-13T00:57:28,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741991_1167 (size=1832290) 2024-12-13T00:57:28,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741992_1168 (size=30081) 2024-12-13T00:57:28,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741992_1168 (size=30081) 2024-12-13T00:57:28,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741992_1168 (size=30081) 2024-12-13T00:57:28,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741993_1169 (size=53616) 2024-12-13T00:57:28,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741993_1169 (size=53616) 2024-12-13T00:57:28,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741993_1169 (size=53616) 2024-12-13T00:57:28,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741994_1170 (size=29229) 2024-12-13T00:57:28,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741994_1170 (size=29229) 2024-12-13T00:57:28,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741994_1170 (size=29229) 2024-12-13T00:57:28,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741995_1171 (size=169089) 2024-12-13T00:57:28,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741995_1171 (size=169089) 2024-12-13T00:57:28,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741995_1171 (size=169089) 2024-12-13T00:57:28,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741996_1172 (size=451756) 2024-12-13T00:57:28,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741996_1172 (size=451756) 2024-12-13T00:57:28,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741996_1172 (size=451756) 2024-12-13T00:57:28,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741997_1173 (size=6350924) 2024-12-13T00:57:28,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741997_1173 (size=6350924) 2024-12-13T00:57:28,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741997_1173 (size=6350924) 2024-12-13T00:57:28,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741998_1174 (size=5175431) 2024-12-13T00:57:28,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741998_1174 (size=5175431) 2024-12-13T00:57:28,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741998_1174 (size=5175431) 2024-12-13T00:57:28,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741999_1175 (size=136454) 2024-12-13T00:57:28,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741999_1175 (size=136454) 2024-12-13T00:57:28,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741999_1175 (size=136454) 2024-12-13T00:57:28,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742000_1176 (size=907855) 2024-12-13T00:57:28,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742000_1176 (size=907855) 2024-12-13T00:57:28,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742000_1176 (size=907855) 2024-12-13T00:57:28,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742001_1177 (size=3317408) 2024-12-13T00:57:28,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742001_1177 (size=3317408) 2024-12-13T00:57:28,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742001_1177 (size=3317408) 2024-12-13T00:57:28,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742002_1178 (size=503880) 2024-12-13T00:57:28,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742002_1178 (size=503880) 2024-12-13T00:57:28,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742002_1178 (size=503880) 2024-12-13T00:57:28,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742003_1179 (size=4695811) 2024-12-13T00:57:28,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742003_1179 (size=4695811) 2024-12-13T00:57:28,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742003_1179 (size=4695811) 2024-12-13T00:57:28,522 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-13T00:57:28,525 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb0-testExportFileSystemState' hfile list 2024-12-13T00:57:28,528 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=13.3 K 2024-12-13T00:57:28,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742004_1180 (size=344) 2024-12-13T00:57:28,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742004_1180 (size=344) 2024-12-13T00:57:28,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742004_1180 (size=344) 2024-12-13T00:57:28,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742005_1181 (size=15) 2024-12-13T00:57:28,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742005_1181 (size=15) 2024-12-13T00:57:28,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742005_1181 (size=15) 2024-12-13T00:57:28,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742006_1182 (size=304893) 2024-12-13T00:57:28,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742006_1182 (size=304893) 2024-12-13T00:57:28,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742006_1182 (size=304893) 2024-12-13T00:57:28,622 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-13T00:57:28,622 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-13T00:57:28,722 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734051393214_0003_000001 (auth:SIMPLE) from 127.0.0.1:47564 2024-12-13T00:57:29,201 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-13T00:57:29,403 DEBUG [master/9812beb1e4cd:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region f365b65dae95a0516608c8eeb7b680b7 changed from -1.0 to 0.0, refreshing cache 2024-12-13T00:57:29,404 DEBUG [master/9812beb1e4cd:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region ec0b71eb23afa1c8964152a729ede4eb changed from -1.0 to 0.0, refreshing cache 2024-12-13T00:57:29,404 DEBUG [master/9812beb1e4cd:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region c77b7057609be1e5abde48e096a7d0b7 changed from -1.0 to 0.0, refreshing cache 2024-12-13T00:57:29,404 DEBUG [master/9812beb1e4cd:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region a63180885f4213bf0dc326227a9fd1b4 changed from -1.0 to 0.0, refreshing cache 2024-12-13T00:57:34,530 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState 2024-12-13T00:57:34,530 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState Metrics about Tables on a single HBase RegionServer 2024-12-13T00:57:35,332 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734051393214_0003_000001 (auth:SIMPLE) from 127.0.0.1:35966 2024-12-13T00:57:35,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742007_1183 (size=350567) 2024-12-13T00:57:35,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742007_1183 (size=350567) 2024-12-13T00:57:35,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742007_1183 (size=350567) 2024-12-13T00:57:37,626 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734051393214_0003_000001 (auth:SIMPLE) from 127.0.0.1:33926 2024-12-13T00:57:42,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742008_1184 (size=8392) 2024-12-13T00:57:42,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742008_1184 (size=8392) 2024-12-13T00:57:42,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742008_1184 (size=8392) 2024-12-13T00:57:42,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742009_1185 (size=5216) 2024-12-13T00:57:42,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742009_1185 (size=5216) 2024-12-13T00:57:42,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742009_1185 (size=5216) 2024-12-13T00:57:42,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742010_1186 (size=17422) 2024-12-13T00:57:42,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742010_1186 (size=17422) 2024-12-13T00:57:42,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742010_1186 (size=17422) 2024-12-13T00:57:42,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742011_1187 (size=465) 2024-12-13T00:57:42,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742011_1187 (size=465) 2024-12-13T00:57:42,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742011_1187 (size=465) 2024-12-13T00:57:42,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742012_1188 (size=17422) 2024-12-13T00:57:42,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742012_1188 (size=17422) 2024-12-13T00:57:42,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742012_1188 (size=17422) 2024-12-13T00:57:42,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742013_1189 (size=350567) 2024-12-13T00:57:42,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742013_1189 (size=350567) 2024-12-13T00:57:42,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742013_1189 (size=350567) 2024-12-13T00:57:42,323 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734051393214_0003_000001 (auth:SIMPLE) from 127.0.0.1:33934 2024-12-13T00:57:43,797 INFO [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export 2024-12-13T00:57:43,800 INFO [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity. 2024-12-13T00:57:43,817 INFO [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: snaptb0-testExportFileSystemState 2024-12-13T00:57:43,817 INFO [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot 2024-12-13T00:57:43,818 INFO [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state 2024-12-13T00:57:43,818 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1349531497_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/snaptb0-testExportFileSystemState at hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-12-13T00:57:43,819 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/snaptb0-testExportFileSystemState/.snapshotinfo 2024-12-13T00:57:43,819 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/snaptb0-testExportFileSystemState/data.manifest 2024-12-13T00:57:43,819 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1349531497_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/export-test/export-1734051446516/.hbase-snapshot/snaptb0-testExportFileSystemState at hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/export-test/export-1734051446516/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-12-13T00:57:43,821 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/export-test/export-1734051446516/.hbase-snapshot/snaptb0-testExportFileSystemState/.snapshotinfo 2024-12-13T00:57:43,821 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/export-test/export-1734051446516/.hbase-snapshot/snaptb0-testExportFileSystemState/data.manifest 2024-12-13T00:57:43,829 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testExportFileSystemState 2024-12-13T00:57:43,830 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemState 2024-12-13T00:57:43,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] procedure2.ProcedureExecutor(1098): Stored pid=74, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testExportFileSystemState 2024-12-13T00:57:43,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=74 2024-12-13T00:57:43,834 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734051463833"}]},"ts":"1734051463833"} 2024-12-13T00:57:43,836 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemState, state=DISABLING in hbase:meta 2024-12-13T00:57:43,838 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemState to state=DISABLING 2024-12-13T00:57:43,839 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=75, ppid=74, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testExportFileSystemState}] 2024-12-13T00:57:43,840 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=76, ppid=75, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=a63180885f4213bf0dc326227a9fd1b4, UNASSIGN}, {pid=77, ppid=75, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=ec0b71eb23afa1c8964152a729ede4eb, UNASSIGN}] 2024-12-13T00:57:43,841 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=77, ppid=75, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=ec0b71eb23afa1c8964152a729ede4eb, UNASSIGN 2024-12-13T00:57:43,841 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=76, ppid=75, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=a63180885f4213bf0dc326227a9fd1b4, UNASSIGN 2024-12-13T00:57:43,842 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=77 updating hbase:meta row=ec0b71eb23afa1c8964152a729ede4eb, regionState=CLOSING, regionLocation=9812beb1e4cd,40687,1734051385054 2024-12-13T00:57:43,843 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=76 updating hbase:meta row=a63180885f4213bf0dc326227a9fd1b4, regionState=CLOSING, regionLocation=9812beb1e4cd,46075,1734051384875 2024-12-13T00:57:43,845 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-13T00:57:43,845 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=78, ppid=77, state=RUNNABLE; CloseRegionProcedure ec0b71eb23afa1c8964152a729ede4eb, server=9812beb1e4cd,40687,1734051385054}] 2024-12-13T00:57:43,845 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-13T00:57:43,846 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=79, ppid=76, state=RUNNABLE; CloseRegionProcedure a63180885f4213bf0dc326227a9fd1b4, server=9812beb1e4cd,46075,1734051384875}] 2024-12-13T00:57:43,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=74 2024-12-13T00:57:43,997 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,40687,1734051385054 2024-12-13T00:57:43,997 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] handler.UnassignRegionHandler(124): Close ec0b71eb23afa1c8964152a729ede4eb 2024-12-13T00:57:43,997 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-13T00:57:43,998 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,46075,1734051384875 2024-12-13T00:57:43,998 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] regionserver.HRegion(1681): Closing ec0b71eb23afa1c8964152a729ede4eb, disabling compactions & flushes 2024-12-13T00:57:43,998 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemState,1,1734051444411.ec0b71eb23afa1c8964152a729ede4eb. 2024-12-13T00:57:43,998 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemState,1,1734051444411.ec0b71eb23afa1c8964152a729ede4eb. 2024-12-13T00:57:43,998 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemState,1,1734051444411.ec0b71eb23afa1c8964152a729ede4eb. after waiting 0 ms 2024-12-13T00:57:43,998 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemState,1,1734051444411.ec0b71eb23afa1c8964152a729ede4eb. 2024-12-13T00:57:43,998 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] handler.UnassignRegionHandler(124): Close a63180885f4213bf0dc326227a9fd1b4 2024-12-13T00:57:43,998 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-13T00:57:43,998 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] regionserver.HRegion(1681): Closing a63180885f4213bf0dc326227a9fd1b4, disabling compactions & flushes 2024-12-13T00:57:43,999 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemState,,1734051444411.a63180885f4213bf0dc326227a9fd1b4. 2024-12-13T00:57:43,999 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemState,,1734051444411.a63180885f4213bf0dc326227a9fd1b4. 2024-12-13T00:57:43,999 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemState,,1734051444411.a63180885f4213bf0dc326227a9fd1b4. after waiting 0 ms 2024-12-13T00:57:43,999 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemState,,1734051444411.a63180885f4213bf0dc326227a9fd1b4. 2024-12-13T00:57:44,010 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemState/ec0b71eb23afa1c8964152a729ede4eb/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-13T00:57:44,011 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-13T00:57:44,011 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemState,1,1734051444411.ec0b71eb23afa1c8964152a729ede4eb. 2024-12-13T00:57:44,011 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] regionserver.HRegion(1635): Region close journal for ec0b71eb23afa1c8964152a729ede4eb: 2024-12-13T00:57:44,012 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemState/a63180885f4213bf0dc326227a9fd1b4/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-13T00:57:44,013 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] handler.UnassignRegionHandler(170): Closed ec0b71eb23afa1c8964152a729ede4eb 2024-12-13T00:57:44,013 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-13T00:57:44,014 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemState,,1734051444411.a63180885f4213bf0dc326227a9fd1b4. 2024-12-13T00:57:44,014 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] regionserver.HRegion(1635): Region close journal for a63180885f4213bf0dc326227a9fd1b4: 2024-12-13T00:57:44,014 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=77 updating hbase:meta row=ec0b71eb23afa1c8964152a729ede4eb, regionState=CLOSED 2024-12-13T00:57:44,016 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] handler.UnassignRegionHandler(170): Closed a63180885f4213bf0dc326227a9fd1b4 2024-12-13T00:57:44,017 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=76 updating hbase:meta row=a63180885f4213bf0dc326227a9fd1b4, regionState=CLOSED 2024-12-13T00:57:44,018 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=78, resume processing ppid=77 2024-12-13T00:57:44,018 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=78, ppid=77, state=SUCCESS; CloseRegionProcedure ec0b71eb23afa1c8964152a729ede4eb, server=9812beb1e4cd,40687,1734051385054 in 171 msec 2024-12-13T00:57:44,021 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=77, ppid=75, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=ec0b71eb23afa1c8964152a729ede4eb, UNASSIGN in 178 msec 2024-12-13T00:57:44,022 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=79, resume processing ppid=76 2024-12-13T00:57:44,022 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=79, ppid=76, state=SUCCESS; CloseRegionProcedure a63180885f4213bf0dc326227a9fd1b4, server=9812beb1e4cd,46075,1734051384875 in 175 msec 2024-12-13T00:57:44,023 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=76, resume processing ppid=75 2024-12-13T00:57:44,023 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=76, ppid=75, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=a63180885f4213bf0dc326227a9fd1b4, UNASSIGN in 182 msec 2024-12-13T00:57:44,025 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=75, resume processing ppid=74 2024-12-13T00:57:44,025 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=75, ppid=74, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testExportFileSystemState in 185 msec 2024-12-13T00:57:44,026 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734051464026"}]},"ts":"1734051464026"} 2024-12-13T00:57:44,028 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemState, state=DISABLED in hbase:meta 2024-12-13T00:57:44,030 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemState to state=DISABLED 2024-12-13T00:57:44,032 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=74, state=SUCCESS; DisableTableProcedure table=testtb-testExportFileSystemState in 201 msec 2024-12-13T00:57:44,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=74 2024-12-13T00:57:44,139 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemState, procId: 74 completed 2024-12-13T00:57:44,140 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemState 2024-12-13T00:57:44,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] procedure2.ProcedureExecutor(1098): Stored pid=80, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-13T00:57:44,142 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=80, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-13T00:57:44,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testExportFileSystemState 2024-12-13T00:57:44,146 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40687 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemState 2024-12-13T00:57:44,149 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-13T00:57:44,149 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45231-0x10088dfff310002, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-13T00:57:44,150 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46075-0x10088dfff310001, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-13T00:57:44,150 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40687-0x10088dfff310003, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-13T00:57:44,150 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=80, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-13T00:57:44,150 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-13T00:57:44,150 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-13T00:57:44,151 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-13T00:57:44,151 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-13T00:57:44,152 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-13T00:57:44,152 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T00:57:44,152 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40687-0x10088dfff310003, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-13T00:57:44,152 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40687-0x10088dfff310003, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T00:57:44,152 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46075-0x10088dfff310001, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-13T00:57:44,153 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46075-0x10088dfff310001, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T00:57:44,153 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45231-0x10088dfff310002, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-13T00:57:44,153 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45231-0x10088dfff310002, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T00:57:44,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=80 2024-12-13T00:57:44,157 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemState/a63180885f4213bf0dc326227a9fd1b4 2024-12-13T00:57:44,157 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemState/ec0b71eb23afa1c8964152a729ede4eb 2024-12-13T00:57:44,160 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemState/a63180885f4213bf0dc326227a9fd1b4/cf, FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemState/a63180885f4213bf0dc326227a9fd1b4/recovered.edits] 2024-12-13T00:57:44,160 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemState/ec0b71eb23afa1c8964152a729ede4eb/cf, FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemState/ec0b71eb23afa1c8964152a729ede4eb/recovered.edits] 2024-12-13T00:57:44,166 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemState/a63180885f4213bf0dc326227a9fd1b4/cf/1a9da160f3e8431d9edf07909e6b9c27 to hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/archive/data/default/testtb-testExportFileSystemState/a63180885f4213bf0dc326227a9fd1b4/cf/1a9da160f3e8431d9edf07909e6b9c27 2024-12-13T00:57:44,166 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemState/ec0b71eb23afa1c8964152a729ede4eb/cf/26d71a1656604b5b875ecf147a09f11e to hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/archive/data/default/testtb-testExportFileSystemState/ec0b71eb23afa1c8964152a729ede4eb/cf/26d71a1656604b5b875ecf147a09f11e 2024-12-13T00:57:44,171 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemState/a63180885f4213bf0dc326227a9fd1b4/recovered.edits/9.seqid to hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/archive/data/default/testtb-testExportFileSystemState/a63180885f4213bf0dc326227a9fd1b4/recovered.edits/9.seqid 2024-12-13T00:57:44,171 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemState/ec0b71eb23afa1c8964152a729ede4eb/recovered.edits/9.seqid to hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/archive/data/default/testtb-testExportFileSystemState/ec0b71eb23afa1c8964152a729ede4eb/recovered.edits/9.seqid 2024-12-13T00:57:44,172 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemState/a63180885f4213bf0dc326227a9fd1b4 2024-12-13T00:57:44,172 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemState/ec0b71eb23afa1c8964152a729ede4eb 2024-12-13T00:57:44,172 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemState regions 2024-12-13T00:57:44,175 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=80, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-13T00:57:44,179 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testExportFileSystemState from hbase:meta 2024-12-13T00:57:44,182 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportFileSystemState' descriptor. 2024-12-13T00:57:44,184 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=80, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-13T00:57:44,184 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportFileSystemState' from region states. 2024-12-13T00:57:44,184 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState,,1734051444411.a63180885f4213bf0dc326227a9fd1b4.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734051464184"}]},"ts":"9223372036854775807"} 2024-12-13T00:57:44,184 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState,1,1734051444411.ec0b71eb23afa1c8964152a729ede4eb.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734051464184"}]},"ts":"9223372036854775807"} 2024-12-13T00:57:44,187 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-13T00:57:44,187 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => a63180885f4213bf0dc326227a9fd1b4, NAME => 'testtb-testExportFileSystemState,,1734051444411.a63180885f4213bf0dc326227a9fd1b4.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => ec0b71eb23afa1c8964152a729ede4eb, NAME => 'testtb-testExportFileSystemState,1,1734051444411.ec0b71eb23afa1c8964152a729ede4eb.', STARTKEY => '1', ENDKEY => ''}] 2024-12-13T00:57:44,187 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportFileSystemState' as deleted. 2024-12-13T00:57:44,187 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734051464187"}]},"ts":"9223372036854775807"} 2024-12-13T00:57:44,189 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testExportFileSystemState state from META 2024-12-13T00:57:44,191 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=80, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-13T00:57:44,192 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=80, state=SUCCESS; DeleteTableProcedure table=testtb-testExportFileSystemState in 51 msec 2024-12-13T00:57:44,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=80 2024-12-13T00:57:44,256 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testExportFileSystemState, procId: 80 completed 2024-12-13T00:57:44,272 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemState" 2024-12-13T00:57:44,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testExportFileSystemState 2024-12-13T00:57:44,277 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemState" 2024-12-13T00:57:44,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testExportFileSystemState 2024-12-13T00:57:44,302 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportFileSystemState Thread=784 (was 783) Potentially hanging thread: Thread-2766 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: hconnection-0x71b2f156-shared-pool-20 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x71b2f156-shared-pool-19 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 28252) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x71b2f156-shared-pool-18 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x71b2f156-shared-pool-17 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1822079748_1 at /127.0.0.1:43420 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1349531497_22 at /127.0.0.1:59288 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1349531497_22 at /127.0.0.1:34282 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1349531497_22 at /127.0.0.1:32930 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35805 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1941338361) connection to localhost/127.0.0.1:35805 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) - Thread LEAK? -, OpenFileDescriptor=772 (was 780), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=571 (was 592), ProcessCount=18 (was 20), AvailableMemoryMB=4316 (was 4234) - AvailableMemoryMB LEAK? - 2024-12-13T00:57:44,303 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=784 is superior to 500 2024-12-13T00:57:44,322 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testConsecutiveExports Thread=784, OpenFileDescriptor=772, MaxFileDescriptor=1048576, SystemLoadAverage=571, ProcessCount=17, AvailableMemoryMB=4315 2024-12-13T00:57:44,322 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=784 is superior to 500 2024-12-13T00:57:44,325 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-13T00:57:44,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] procedure2.ProcedureExecutor(1098): Stored pid=81, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testConsecutiveExports 2024-12-13T00:57:44,327 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=81, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_PRE_OPERATION 2024-12-13T00:57:44,327 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T00:57:44,328 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testConsecutiveExports" procId is: 81 2024-12-13T00:57:44,329 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=81, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-13T00:57:44,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-13T00:57:44,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742014_1190 (size=404) 2024-12-13T00:57:44,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742014_1190 (size=404) 2024-12-13T00:57:44,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742014_1190 (size=404) 2024-12-13T00:57:44,349 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 9fbedc17e1e6db0c89da7568df206c9d, NAME => 'testtb-testConsecutiveExports,,1734051464323.9fbedc17e1e6db0c89da7568df206c9d.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee 2024-12-13T00:57:44,349 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => 58b820ea3d545af749e330a65348a6c9, NAME => 'testtb-testConsecutiveExports,1,1734051464323.58b820ea3d545af749e330a65348a6c9.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee 2024-12-13T00:57:44,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742015_1191 (size=65) 2024-12-13T00:57:44,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742015_1191 (size=65) 2024-12-13T00:57:44,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742015_1191 (size=65) 2024-12-13T00:57:44,367 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testConsecutiveExports,,1734051464323.9fbedc17e1e6db0c89da7568df206c9d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T00:57:44,367 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1681): Closing 9fbedc17e1e6db0c89da7568df206c9d, disabling compactions & flushes 2024-12-13T00:57:44,367 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testConsecutiveExports,,1734051464323.9fbedc17e1e6db0c89da7568df206c9d. 2024-12-13T00:57:44,367 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testConsecutiveExports,,1734051464323.9fbedc17e1e6db0c89da7568df206c9d. 2024-12-13T00:57:44,367 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testConsecutiveExports,,1734051464323.9fbedc17e1e6db0c89da7568df206c9d. after waiting 0 ms 2024-12-13T00:57:44,367 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testConsecutiveExports,,1734051464323.9fbedc17e1e6db0c89da7568df206c9d. 2024-12-13T00:57:44,367 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testConsecutiveExports,,1734051464323.9fbedc17e1e6db0c89da7568df206c9d. 2024-12-13T00:57:44,367 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1635): Region close journal for 9fbedc17e1e6db0c89da7568df206c9d: 2024-12-13T00:57:44,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742016_1192 (size=65) 2024-12-13T00:57:44,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742016_1192 (size=65) 2024-12-13T00:57:44,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742016_1192 (size=65) 2024-12-13T00:57:44,380 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testConsecutiveExports,1,1734051464323.58b820ea3d545af749e330a65348a6c9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T00:57:44,380 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1681): Closing 58b820ea3d545af749e330a65348a6c9, disabling compactions & flushes 2024-12-13T00:57:44,380 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testConsecutiveExports,1,1734051464323.58b820ea3d545af749e330a65348a6c9. 2024-12-13T00:57:44,380 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testConsecutiveExports,1,1734051464323.58b820ea3d545af749e330a65348a6c9. 2024-12-13T00:57:44,380 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testConsecutiveExports,1,1734051464323.58b820ea3d545af749e330a65348a6c9. after waiting 0 ms 2024-12-13T00:57:44,380 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testConsecutiveExports,1,1734051464323.58b820ea3d545af749e330a65348a6c9. 2024-12-13T00:57:44,380 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testConsecutiveExports,1,1734051464323.58b820ea3d545af749e330a65348a6c9. 2024-12-13T00:57:44,380 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1635): Region close journal for 58b820ea3d545af749e330a65348a6c9: 2024-12-13T00:57:44,382 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=81, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_ADD_TO_META 2024-12-13T00:57:44,382 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testConsecutiveExports,,1734051464323.9fbedc17e1e6db0c89da7568df206c9d.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1734051464382"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734051464382"}]},"ts":"1734051464382"} 2024-12-13T00:57:44,382 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testConsecutiveExports,1,1734051464323.58b820ea3d545af749e330a65348a6c9.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1734051464382"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734051464382"}]},"ts":"1734051464382"} 2024-12-13T00:57:44,386 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-13T00:57:44,387 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=81, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-13T00:57:44,388 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734051464387"}]},"ts":"1734051464387"} 2024-12-13T00:57:44,390 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testConsecutiveExports, state=ENABLING in hbase:meta 2024-12-13T00:57:44,395 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(202): Hosts are {9812beb1e4cd=0} racks are {/default-rack=0} 2024-12-13T00:57:44,398 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-13T00:57:44,398 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-13T00:57:44,398 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-13T00:57:44,398 INFO [PEWorker-5 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-13T00:57:44,398 INFO [PEWorker-5 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-13T00:57:44,398 INFO [PEWorker-5 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-13T00:57:44,398 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-13T00:57:44,398 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=82, ppid=81, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=9fbedc17e1e6db0c89da7568df206c9d, ASSIGN}, {pid=83, ppid=81, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=58b820ea3d545af749e330a65348a6c9, ASSIGN}] 2024-12-13T00:57:44,399 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=82, ppid=81, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=9fbedc17e1e6db0c89da7568df206c9d, ASSIGN 2024-12-13T00:57:44,400 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=83, ppid=81, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=58b820ea3d545af749e330a65348a6c9, ASSIGN 2024-12-13T00:57:44,400 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=82, ppid=81, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=9fbedc17e1e6db0c89da7568df206c9d, ASSIGN; state=OFFLINE, location=9812beb1e4cd,45231,1734051384968; forceNewPlan=false, retain=false 2024-12-13T00:57:44,401 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=83, ppid=81, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=58b820ea3d545af749e330a65348a6c9, ASSIGN; state=OFFLINE, location=9812beb1e4cd,40687,1734051385054; forceNewPlan=false, retain=false 2024-12-13T00:57:44,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-13T00:57:44,530 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState 2024-12-13T00:57:44,551 INFO [9812beb1e4cd:38293 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-13T00:57:44,551 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=83 updating hbase:meta row=58b820ea3d545af749e330a65348a6c9, regionState=OPENING, regionLocation=9812beb1e4cd,40687,1734051385054 2024-12-13T00:57:44,551 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=82 updating hbase:meta row=9fbedc17e1e6db0c89da7568df206c9d, regionState=OPENING, regionLocation=9812beb1e4cd,45231,1734051384968 2024-12-13T00:57:44,554 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=84, ppid=83, state=RUNNABLE; OpenRegionProcedure 58b820ea3d545af749e330a65348a6c9, server=9812beb1e4cd,40687,1734051385054}] 2024-12-13T00:57:44,560 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=85, ppid=82, state=RUNNABLE; OpenRegionProcedure 9fbedc17e1e6db0c89da7568df206c9d, server=9812beb1e4cd,45231,1734051384968}] 2024-12-13T00:57:44,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-13T00:57:44,707 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,40687,1734051385054 2024-12-13T00:57:44,711 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] handler.AssignRegionHandler(135): Open testtb-testConsecutiveExports,1,1734051464323.58b820ea3d545af749e330a65348a6c9. 2024-12-13T00:57:44,711 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(7285): Opening region: {ENCODED => 58b820ea3d545af749e330a65348a6c9, NAME => 'testtb-testConsecutiveExports,1,1734051464323.58b820ea3d545af749e330a65348a6c9.', STARTKEY => '1', ENDKEY => ''} 2024-12-13T00:57:44,712 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,45231,1734051384968 2024-12-13T00:57:44,712 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testConsecutiveExports,1,1734051464323.58b820ea3d545af749e330a65348a6c9. service=AccessControlService 2024-12-13T00:57:44,712 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-13T00:57:44,713 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testConsecutiveExports 58b820ea3d545af749e330a65348a6c9 2024-12-13T00:57:44,713 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(894): Instantiated testtb-testConsecutiveExports,1,1734051464323.58b820ea3d545af749e330a65348a6c9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T00:57:44,713 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(7327): checking encryption for 58b820ea3d545af749e330a65348a6c9 2024-12-13T00:57:44,713 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(7330): checking classloading for 58b820ea3d545af749e330a65348a6c9 2024-12-13T00:57:44,714 INFO [StoreOpener-58b820ea3d545af749e330a65348a6c9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 58b820ea3d545af749e330a65348a6c9 2024-12-13T00:57:44,716 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] handler.AssignRegionHandler(135): Open testtb-testConsecutiveExports,,1734051464323.9fbedc17e1e6db0c89da7568df206c9d. 2024-12-13T00:57:44,717 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(7285): Opening region: {ENCODED => 9fbedc17e1e6db0c89da7568df206c9d, NAME => 'testtb-testConsecutiveExports,,1734051464323.9fbedc17e1e6db0c89da7568df206c9d.', STARTKEY => '', ENDKEY => '1'} 2024-12-13T00:57:44,717 INFO [StoreOpener-58b820ea3d545af749e330a65348a6c9-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 58b820ea3d545af749e330a65348a6c9 columnFamilyName cf 2024-12-13T00:57:44,717 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testConsecutiveExports,,1734051464323.9fbedc17e1e6db0c89da7568df206c9d. service=AccessControlService 2024-12-13T00:57:44,717 DEBUG [StoreOpener-58b820ea3d545af749e330a65348a6c9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T00:57:44,717 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-13T00:57:44,717 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testConsecutiveExports 9fbedc17e1e6db0c89da7568df206c9d 2024-12-13T00:57:44,717 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(894): Instantiated testtb-testConsecutiveExports,,1734051464323.9fbedc17e1e6db0c89da7568df206c9d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T00:57:44,717 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(7327): checking encryption for 9fbedc17e1e6db0c89da7568df206c9d 2024-12-13T00:57:44,717 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(7330): checking classloading for 9fbedc17e1e6db0c89da7568df206c9d 2024-12-13T00:57:44,717 INFO [StoreOpener-58b820ea3d545af749e330a65348a6c9-1 {}] regionserver.HStore(327): Store=58b820ea3d545af749e330a65348a6c9/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T00:57:44,718 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testConsecutiveExports/58b820ea3d545af749e330a65348a6c9 2024-12-13T00:57:44,720 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testConsecutiveExports/58b820ea3d545af749e330a65348a6c9 2024-12-13T00:57:44,721 INFO [StoreOpener-9fbedc17e1e6db0c89da7568df206c9d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 9fbedc17e1e6db0c89da7568df206c9d 2024-12-13T00:57:44,723 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(1085): writing seq id for 58b820ea3d545af749e330a65348a6c9 2024-12-13T00:57:44,732 INFO [StoreOpener-9fbedc17e1e6db0c89da7568df206c9d-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9fbedc17e1e6db0c89da7568df206c9d columnFamilyName cf 2024-12-13T00:57:44,732 DEBUG [StoreOpener-9fbedc17e1e6db0c89da7568df206c9d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T00:57:44,733 INFO [StoreOpener-9fbedc17e1e6db0c89da7568df206c9d-1 {}] regionserver.HStore(327): Store=9fbedc17e1e6db0c89da7568df206c9d/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T00:57:44,734 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testConsecutiveExports/9fbedc17e1e6db0c89da7568df206c9d 2024-12-13T00:57:44,734 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testConsecutiveExports/9fbedc17e1e6db0c89da7568df206c9d 2024-12-13T00:57:44,736 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(1085): writing seq id for 9fbedc17e1e6db0c89da7568df206c9d 2024-12-13T00:57:44,737 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testConsecutiveExports/58b820ea3d545af749e330a65348a6c9/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T00:57:44,737 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(1102): Opened 58b820ea3d545af749e330a65348a6c9; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73218020, jitterRate=0.09103351831436157}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-13T00:57:44,738 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(1001): Region open journal for 58b820ea3d545af749e330a65348a6c9: 2024-12-13T00:57:44,739 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testConsecutiveExports,1,1734051464323.58b820ea3d545af749e330a65348a6c9., pid=84, masterSystemTime=1734051464707 2024-12-13T00:57:44,740 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testConsecutiveExports,1,1734051464323.58b820ea3d545af749e330a65348a6c9. 2024-12-13T00:57:44,740 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] handler.AssignRegionHandler(164): Opened testtb-testConsecutiveExports,1,1734051464323.58b820ea3d545af749e330a65348a6c9. 2024-12-13T00:57:44,741 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=83 updating hbase:meta row=58b820ea3d545af749e330a65348a6c9, regionState=OPEN, openSeqNum=2, regionLocation=9812beb1e4cd,40687,1734051385054 2024-12-13T00:57:44,744 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testConsecutiveExports/9fbedc17e1e6db0c89da7568df206c9d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T00:57:44,745 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(1102): Opened 9fbedc17e1e6db0c89da7568df206c9d; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64000665, jitterRate=-0.04631577432155609}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-13T00:57:44,745 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(1001): Region open journal for 9fbedc17e1e6db0c89da7568df206c9d: 2024-12-13T00:57:44,746 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testConsecutiveExports,,1734051464323.9fbedc17e1e6db0c89da7568df206c9d., pid=85, masterSystemTime=1734051464712 2024-12-13T00:57:44,746 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=84, resume processing ppid=83 2024-12-13T00:57:44,747 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=84, ppid=83, state=SUCCESS; OpenRegionProcedure 58b820ea3d545af749e330a65348a6c9, server=9812beb1e4cd,40687,1734051385054 in 191 msec 2024-12-13T00:57:44,748 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testConsecutiveExports,,1734051464323.9fbedc17e1e6db0c89da7568df206c9d. 2024-12-13T00:57:44,748 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] handler.AssignRegionHandler(164): Opened testtb-testConsecutiveExports,,1734051464323.9fbedc17e1e6db0c89da7568df206c9d. 2024-12-13T00:57:44,748 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=83, ppid=81, state=SUCCESS; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=58b820ea3d545af749e330a65348a6c9, ASSIGN in 349 msec 2024-12-13T00:57:44,748 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=82 updating hbase:meta row=9fbedc17e1e6db0c89da7568df206c9d, regionState=OPEN, openSeqNum=2, regionLocation=9812beb1e4cd,45231,1734051384968 2024-12-13T00:57:44,751 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=85, resume processing ppid=82 2024-12-13T00:57:44,751 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=85, ppid=82, state=SUCCESS; OpenRegionProcedure 9fbedc17e1e6db0c89da7568df206c9d, server=9812beb1e4cd,45231,1734051384968 in 190 msec 2024-12-13T00:57:44,752 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=82, resume processing ppid=81 2024-12-13T00:57:44,752 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=82, ppid=81, state=SUCCESS; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=9fbedc17e1e6db0c89da7568df206c9d, ASSIGN in 353 msec 2024-12-13T00:57:44,753 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=81, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-13T00:57:44,753 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734051464753"}]},"ts":"1734051464753"} 2024-12-13T00:57:44,754 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testConsecutiveExports, state=ENABLED in hbase:meta 2024-12-13T00:57:44,757 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=81, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_POST_OPERATION 2024-12-13T00:57:44,757 DEBUG [PEWorker-5 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testConsecutiveExports jenkins: RWXCA 2024-12-13T00:57:44,760 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40687 {}] access.PermissionStorage(611): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-12-13T00:57:44,762 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T00:57:44,762 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45231-0x10088dfff310002, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T00:57:44,762 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46075-0x10088dfff310001, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T00:57:44,762 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40687-0x10088dfff310003, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T00:57:44,764 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-13T00:57:44,764 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-13T00:57:44,764 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-13T00:57:44,767 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-13T00:57:44,769 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=81, state=SUCCESS; CreateTableProcedure table=testtb-testConsecutiveExports in 440 msec 2024-12-13T00:57:44,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-13T00:57:44,934 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testConsecutiveExports, procId: 81 completed 2024-12-13T00:57:44,934 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testtb-testConsecutiveExports get assigned. Timeout = 60000ms 2024-12-13T00:57:44,935 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-13T00:57:44,939 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testtb-testConsecutiveExports assigned to meta. Checking AM states. 2024-12-13T00:57:44,939 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-13T00:57:44,939 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testtb-testConsecutiveExports assigned. 2024-12-13T00:57:44,948 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-13T00:57:44,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734051464948 (current time:1734051464948). 2024-12-13T00:57:44,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-13T00:57:44,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testConsecutiveExports VERSION not specified, setting to 2 2024-12-13T00:57:44,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-13T00:57:44,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3f5ef52b to 127.0.0.1:49424 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3a431662 2024-12-13T00:57:44,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@11c1d7c9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T00:57:44,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T00:57:44,967 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44310, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T00:57:44,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3f5ef52b to 127.0.0.1:49424 2024-12-13T00:57:44,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T00:57:44,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3a2953c1 to 127.0.0.1:49424 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@17491fe2 2024-12-13T00:57:44,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@829ed5d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T00:57:44,988 DEBUG [hconnection-0x228c7846-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T00:57:44,989 INFO [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44316, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T00:57:44,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3a2953c1 to 127.0.0.1:49424 2024-12-13T00:57:44,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T00:57:44,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] access.PermissionStorage(611): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-12-13T00:57:44,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-13T00:57:44,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] procedure2.ProcedureExecutor(1098): Stored pid=86, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-13T00:57:44,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 86 2024-12-13T00:57:44,997 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-13T00:57:44,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=86 2024-12-13T00:57:44,998 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-13T00:57:45,001 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-13T00:57:45,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742017_1193 (size=161) 2024-12-13T00:57:45,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742017_1193 (size=161) 2024-12-13T00:57:45,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742017_1193 (size=161) 2024-12-13T00:57:45,062 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-13T00:57:45,063 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=87, ppid=86, state=RUNNABLE; SnapshotRegionProcedure 9fbedc17e1e6db0c89da7568df206c9d}, {pid=88, ppid=86, state=RUNNABLE; SnapshotRegionProcedure 58b820ea3d545af749e330a65348a6c9}] 2024-12-13T00:57:45,065 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=88, ppid=86, state=RUNNABLE; SnapshotRegionProcedure 58b820ea3d545af749e330a65348a6c9 2024-12-13T00:57:45,065 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=87, ppid=86, state=RUNNABLE; SnapshotRegionProcedure 9fbedc17e1e6db0c89da7568df206c9d 2024-12-13T00:57:45,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=86 2024-12-13T00:57:45,218 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,45231,1734051384968 2024-12-13T00:57:45,219 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45231 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=87 2024-12-13T00:57:45,219 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=87}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,,1734051464323.9fbedc17e1e6db0c89da7568df206c9d. 2024-12-13T00:57:45,219 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=87}] regionserver.HRegion(2538): Flush status journal for 9fbedc17e1e6db0c89da7568df206c9d: 2024-12-13T00:57:45,220 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,40687,1734051385054 2024-12-13T00:57:45,220 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=87}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,,1734051464323.9fbedc17e1e6db0c89da7568df206c9d. for emptySnaptb0-testConsecutiveExports completed. 2024-12-13T00:57:45,220 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=87}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,,1734051464323.9fbedc17e1e6db0c89da7568df206c9d.' region-info for snapshot=emptySnaptb0-testConsecutiveExports 2024-12-13T00:57:45,220 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=87}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-13T00:57:45,220 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=87}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-13T00:57:45,220 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40687 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=88 2024-12-13T00:57:45,221 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=88}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,1,1734051464323.58b820ea3d545af749e330a65348a6c9. 2024-12-13T00:57:45,221 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=88}] regionserver.HRegion(2538): Flush status journal for 58b820ea3d545af749e330a65348a6c9: 2024-12-13T00:57:45,221 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=88}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,1,1734051464323.58b820ea3d545af749e330a65348a6c9. for emptySnaptb0-testConsecutiveExports completed. 2024-12-13T00:57:45,221 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=88}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,1,1734051464323.58b820ea3d545af749e330a65348a6c9.' region-info for snapshot=emptySnaptb0-testConsecutiveExports 2024-12-13T00:57:45,221 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=88}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-13T00:57:45,221 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=88}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-13T00:57:45,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742018_1194 (size=68) 2024-12-13T00:57:45,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742018_1194 (size=68) 2024-12-13T00:57:45,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742018_1194 (size=68) 2024-12-13T00:57:45,239 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=88}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,1,1734051464323.58b820ea3d545af749e330a65348a6c9. 2024-12-13T00:57:45,239 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=88 2024-12-13T00:57:45,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.HMaster(4106): Remote procedure done, pid=88 2024-12-13T00:57:45,240 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testConsecutiveExports on region 58b820ea3d545af749e330a65348a6c9 2024-12-13T00:57:45,240 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=88, ppid=86, state=RUNNABLE; SnapshotRegionProcedure 58b820ea3d545af749e330a65348a6c9 2024-12-13T00:57:45,242 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=88, ppid=86, state=SUCCESS; SnapshotRegionProcedure 58b820ea3d545af749e330a65348a6c9 in 178 msec 2024-12-13T00:57:45,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742019_1195 (size=68) 2024-12-13T00:57:45,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742019_1195 (size=68) 2024-12-13T00:57:45,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742019_1195 (size=68) 2024-12-13T00:57:45,248 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=87}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,,1734051464323.9fbedc17e1e6db0c89da7568df206c9d. 2024-12-13T00:57:45,248 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=87}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=87 2024-12-13T00:57:45,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.HMaster(4106): Remote procedure done, pid=87 2024-12-13T00:57:45,249 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testConsecutiveExports on region 9fbedc17e1e6db0c89da7568df206c9d 2024-12-13T00:57:45,249 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=87, ppid=86, state=RUNNABLE; SnapshotRegionProcedure 9fbedc17e1e6db0c89da7568df206c9d 2024-12-13T00:57:45,253 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=87, resume processing ppid=86 2024-12-13T00:57:45,253 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=87, ppid=86, state=SUCCESS; SnapshotRegionProcedure 9fbedc17e1e6db0c89da7568df206c9d in 188 msec 2024-12-13T00:57:45,253 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-13T00:57:45,254 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-13T00:57:45,254 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-13T00:57:45,254 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testConsecutiveExports 2024-12-13T00:57:45,255 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/.tmp/emptySnaptb0-testConsecutiveExports 2024-12-13T00:57:45,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742020_1196 (size=543) 2024-12-13T00:57:45,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742020_1196 (size=543) 2024-12-13T00:57:45,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742020_1196 (size=543) 2024-12-13T00:57:45,291 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-13T00:57:45,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=86 2024-12-13T00:57:45,319 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-13T00:57:45,320 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/.tmp/emptySnaptb0-testConsecutiveExports to hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/emptySnaptb0-testConsecutiveExports 2024-12-13T00:57:45,322 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-13T00:57:45,323 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 86 2024-12-13T00:57:45,324 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=86, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } in 328 msec 2024-12-13T00:57:45,477 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testConsecutiveExports' 2024-12-13T00:57:45,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=86 2024-12-13T00:57:45,605 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testConsecutiveExports, procId: 86 completed 2024-12-13T00:57:45,625 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45231 {}] regionserver.HRegion(8254): writing data to region testtb-testConsecutiveExports,,1734051464323.9fbedc17e1e6db0c89da7568df206c9d. with WAL disabled. Data may be lost in the event of a crash. 2024-12-13T00:57:45,627 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40687 {}] regionserver.HRegion(8254): writing data to region testtb-testConsecutiveExports,1,1734051464323.58b820ea3d545af749e330a65348a6c9. with WAL disabled. Data may be lost in the event of a crash. 2024-12-13T00:57:45,635 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testConsecutiveExports 2024-12-13T00:57:45,635 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testConsecutiveExports,,1734051464323.9fbedc17e1e6db0c89da7568df206c9d. 2024-12-13T00:57:45,636 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-13T00:57:45,667 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-13T00:57:45,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734051465667 (current time:1734051465667). 2024-12-13T00:57:45,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-13T00:57:45,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testConsecutiveExports VERSION not specified, setting to 2 2024-12-13T00:57:45,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-13T00:57:45,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4a2674d5 to 127.0.0.1:49424 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@b296b9f 2024-12-13T00:57:45,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5e8cd8f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T00:57:45,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T00:57:45,699 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44318, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T00:57:45,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4a2674d5 to 127.0.0.1:49424 2024-12-13T00:57:45,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T00:57:45,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x60b652c0 to 127.0.0.1:49424 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2003a8da 2024-12-13T00:57:45,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@63c49de0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T00:57:45,737 DEBUG [hconnection-0x205bce5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T00:57:45,739 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44332, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T00:57:45,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x60b652c0 to 127.0.0.1:49424 2024-12-13T00:57:45,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T00:57:45,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] access.PermissionStorage(611): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-12-13T00:57:45,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-13T00:57:45,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] procedure2.ProcedureExecutor(1098): Stored pid=89, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-13T00:57:45,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 89 2024-12-13T00:57:45,752 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-13T00:57:45,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-13T00:57:45,754 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-13T00:57:45,758 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-13T00:57:45,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-13T00:57:45,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742021_1197 (size=156) 2024-12-13T00:57:45,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742021_1197 (size=156) 2024-12-13T00:57:45,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742021_1197 (size=156) 2024-12-13T00:57:45,870 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-13T00:57:45,870 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=90, ppid=89, state=RUNNABLE; SnapshotRegionProcedure 9fbedc17e1e6db0c89da7568df206c9d}, {pid=91, ppid=89, state=RUNNABLE; SnapshotRegionProcedure 58b820ea3d545af749e330a65348a6c9}] 2024-12-13T00:57:45,872 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=90, ppid=89, state=RUNNABLE; SnapshotRegionProcedure 9fbedc17e1e6db0c89da7568df206c9d 2024-12-13T00:57:45,872 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=91, ppid=89, state=RUNNABLE; SnapshotRegionProcedure 58b820ea3d545af749e330a65348a6c9 2024-12-13T00:57:46,023 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,40687,1734051385054 2024-12-13T00:57:46,023 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,45231,1734051384968 2024-12-13T00:57:46,024 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40687 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=91 2024-12-13T00:57:46,024 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45231 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=90 2024-12-13T00:57:46,024 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,,1734051464323.9fbedc17e1e6db0c89da7568df206c9d. 2024-12-13T00:57:46,025 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.HRegion(2837): Flushing 9fbedc17e1e6db0c89da7568df206c9d 1/1 column families, dataSize=266 B heapSize=832 B 2024-12-13T00:57:46,025 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,1,1734051464323.58b820ea3d545af749e330a65348a6c9. 2024-12-13T00:57:46,026 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.HRegion(2837): Flushing 58b820ea3d545af749e330a65348a6c9 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-12-13T00:57:46,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-13T00:57:46,074 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testConsecutiveExports/9fbedc17e1e6db0c89da7568df206c9d/.tmp/cf/4f19942efac348ebb09cd531da5847df is 71, key is 081a6456859d17306f648d2b4e6c167c/cf:q/1734051465625/Put/seqid=0 2024-12-13T00:57:46,087 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testConsecutiveExports/58b820ea3d545af749e330a65348a6c9/.tmp/cf/ce3b104a4e4844d6a287844d00e6d347 is 71, key is 12e71e93120103ff8a0153e86781d667/cf:q/1734051465627/Put/seqid=0 2024-12-13T00:57:46,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742022_1198 (size=5356) 2024-12-13T00:57:46,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742022_1198 (size=5356) 2024-12-13T00:57:46,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742022_1198 (size=5356) 2024-12-13T00:57:46,203 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=266 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testConsecutiveExports/9fbedc17e1e6db0c89da7568df206c9d/.tmp/cf/4f19942efac348ebb09cd531da5847df 2024-12-13T00:57:46,214 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testConsecutiveExports/9fbedc17e1e6db0c89da7568df206c9d/.tmp/cf/4f19942efac348ebb09cd531da5847df as hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testConsecutiveExports/9fbedc17e1e6db0c89da7568df206c9d/cf/4f19942efac348ebb09cd531da5847df 2024-12-13T00:57:46,228 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testConsecutiveExports/9fbedc17e1e6db0c89da7568df206c9d/cf/4f19942efac348ebb09cd531da5847df, entries=4, sequenceid=6, filesize=5.2 K 2024-12-13T00:57:46,233 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.HRegion(3040): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for 9fbedc17e1e6db0c89da7568df206c9d in 208ms, sequenceid=6, compaction requested=false 2024-12-13T00:57:46,233 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.HRegion(2538): Flush status journal for 9fbedc17e1e6db0c89da7568df206c9d: 2024-12-13T00:57:46,233 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,,1734051464323.9fbedc17e1e6db0c89da7568df206c9d. for snaptb0-testConsecutiveExports completed. 2024-12-13T00:57:46,233 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,,1734051464323.9fbedc17e1e6db0c89da7568df206c9d.' region-info for snapshot=snaptb0-testConsecutiveExports 2024-12-13T00:57:46,233 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-13T00:57:46,233 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testConsecutiveExports/9fbedc17e1e6db0c89da7568df206c9d/cf/4f19942efac348ebb09cd531da5847df] hfiles 2024-12-13T00:57:46,233 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testConsecutiveExports/9fbedc17e1e6db0c89da7568df206c9d/cf/4f19942efac348ebb09cd531da5847df for snapshot=snaptb0-testConsecutiveExports 2024-12-13T00:57:46,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742023_1199 (size=8256) 2024-12-13T00:57:46,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742023_1199 (size=8256) 2024-12-13T00:57:46,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742023_1199 (size=8256) 2024-12-13T00:57:46,262 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.00 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testConsecutiveExports/58b820ea3d545af749e330a65348a6c9/.tmp/cf/ce3b104a4e4844d6a287844d00e6d347 2024-12-13T00:57:46,271 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testConsecutiveExports/58b820ea3d545af749e330a65348a6c9/.tmp/cf/ce3b104a4e4844d6a287844d00e6d347 as hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testConsecutiveExports/58b820ea3d545af749e330a65348a6c9/cf/ce3b104a4e4844d6a287844d00e6d347 2024-12-13T00:57:46,280 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testConsecutiveExports/58b820ea3d545af749e330a65348a6c9/cf/ce3b104a4e4844d6a287844d00e6d347, entries=46, sequenceid=6, filesize=8.1 K 2024-12-13T00:57:46,281 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.HRegion(3040): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for 58b820ea3d545af749e330a65348a6c9 in 255ms, sequenceid=6, compaction requested=false 2024-12-13T00:57:46,282 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.HRegion(2538): Flush status journal for 58b820ea3d545af749e330a65348a6c9: 2024-12-13T00:57:46,282 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,1,1734051464323.58b820ea3d545af749e330a65348a6c9. for snaptb0-testConsecutiveExports completed. 2024-12-13T00:57:46,282 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,1,1734051464323.58b820ea3d545af749e330a65348a6c9.' region-info for snapshot=snaptb0-testConsecutiveExports 2024-12-13T00:57:46,282 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-13T00:57:46,282 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testConsecutiveExports/58b820ea3d545af749e330a65348a6c9/cf/ce3b104a4e4844d6a287844d00e6d347] hfiles 2024-12-13T00:57:46,282 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testConsecutiveExports/58b820ea3d545af749e330a65348a6c9/cf/ce3b104a4e4844d6a287844d00e6d347 for snapshot=snaptb0-testConsecutiveExports 2024-12-13T00:57:46,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742024_1200 (size=107) 2024-12-13T00:57:46,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742024_1200 (size=107) 2024-12-13T00:57:46,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742024_1200 (size=107) 2024-12-13T00:57:46,300 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,,1734051464323.9fbedc17e1e6db0c89da7568df206c9d. 2024-12-13T00:57:46,300 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=90 2024-12-13T00:57:46,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.HMaster(4106): Remote procedure done, pid=90 2024-12-13T00:57:46,300 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testConsecutiveExports on region 9fbedc17e1e6db0c89da7568df206c9d 2024-12-13T00:57:46,301 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=90, ppid=89, state=RUNNABLE; SnapshotRegionProcedure 9fbedc17e1e6db0c89da7568df206c9d 2024-12-13T00:57:46,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742025_1201 (size=107) 2024-12-13T00:57:46,310 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=90, ppid=89, state=SUCCESS; SnapshotRegionProcedure 9fbedc17e1e6db0c89da7568df206c9d in 436 msec 2024-12-13T00:57:46,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742025_1201 (size=107) 2024-12-13T00:57:46,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742025_1201 (size=107) 2024-12-13T00:57:46,311 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,1,1734051464323.58b820ea3d545af749e330a65348a6c9. 2024-12-13T00:57:46,311 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=91 2024-12-13T00:57:46,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.HMaster(4106): Remote procedure done, pid=91 2024-12-13T00:57:46,312 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testConsecutiveExports on region 58b820ea3d545af749e330a65348a6c9 2024-12-13T00:57:46,312 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=91, ppid=89, state=RUNNABLE; SnapshotRegionProcedure 58b820ea3d545af749e330a65348a6c9 2024-12-13T00:57:46,315 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=91, resume processing ppid=89 2024-12-13T00:57:46,315 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=91, ppid=89, state=SUCCESS; SnapshotRegionProcedure 58b820ea3d545af749e330a65348a6c9 in 443 msec 2024-12-13T00:57:46,315 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-13T00:57:46,316 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-13T00:57:46,316 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-13T00:57:46,316 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testConsecutiveExports 2024-12-13T00:57:46,317 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-13T00:57:46,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-13T00:57:46,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742026_1202 (size=621) 2024-12-13T00:57:46,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742026_1202 (size=621) 2024-12-13T00:57:46,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742026_1202 (size=621) 2024-12-13T00:57:46,386 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-13T00:57:46,398 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-13T00:57:46,399 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports to hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-13T00:57:46,401 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-13T00:57:46,401 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 89 2024-12-13T00:57:46,408 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=89, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } in 655 msec 2024-12-13T00:57:46,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-13T00:57:46,862 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testConsecutiveExports, procId: 89 completed 2024-12-13T00:57:46,863 INFO [Time-limited test {}] snapshot.TestExportSnapshot(476): Local export destination path: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/local-export-1734051466862 2024-12-13T00:57:46,863 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/local-export-1734051466862, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/local-export-1734051466862, srcFsUri=hdfs://localhost:46775, srcDir=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee 2024-12-13T00:57:46,911 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:46775, inputRoot=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee 2024-12-13T00:57:46,911 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=org.apache.hadoop.fs.LocalFileSystem@6d668592, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/local-export-1734051466862, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/local-export-1734051466862/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-13T00:57:46,914 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-13T00:57:46,927 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/snaptb0-testConsecutiveExports to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/local-export-1734051466862/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-13T00:57:47,009 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar 2024-12-13T00:57:47,010 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-13T00:57:47,010 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-13T00:57:47,010 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-13T00:57:47,396 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/MiniMRCluster_1870862804/yarn-2296387876/MiniMRCluster_1870862804-localDir-nm-0_3/usercache/jenkins/appcache/application_1734051393214_0003/container_1734051393214_0003_01_000002/launch_container.sh] 2024-12-13T00:57:47,397 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/MiniMRCluster_1870862804/yarn-2296387876/MiniMRCluster_1870862804-localDir-nm-0_3/usercache/jenkins/appcache/application_1734051393214_0003/container_1734051393214_0003_01_000002/container_tokens] 2024-12-13T00:57:47,397 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/MiniMRCluster_1870862804/yarn-2296387876/MiniMRCluster_1870862804-localDir-nm-0_3/usercache/jenkins/appcache/application_1734051393214_0003/container_1734051393214_0003_01_000002/sysfs] 2024-12-13T00:57:48,295 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/hadoop-10576673253547292802.jar 2024-12-13T00:57:48,296 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-13T00:57:48,296 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-13T00:57:48,369 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/hadoop-13745913334056431357.jar 2024-12-13T00:57:48,369 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-13T00:57:48,369 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-13T00:57:48,370 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-13T00:57:48,370 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-13T00:57:48,370 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-13T00:57:48,371 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-13T00:57:48,371 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-13T00:57:48,371 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-13T00:57:48,371 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-13T00:57:48,371 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-13T00:57:48,372 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-13T00:57:48,372 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-13T00:57:48,372 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-13T00:57:48,373 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-13T00:57:48,373 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-13T00:57:48,373 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-13T00:57:48,374 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-13T00:57:48,374 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-13T00:57:48,375 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-13T00:57:48,375 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-13T00:57:48,375 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-13T00:57:48,375 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-13T00:57:48,376 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-13T00:57:48,376 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-13T00:57:48,376 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-13T00:57:48,444 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734051393214_0003_000001 (auth:SIMPLE) from 127.0.0.1:51240 2024-12-13T00:57:48,466 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/MiniMRCluster_1870862804/yarn-2296387876/MiniMRCluster_1870862804-localDir-nm-0_2/usercache/jenkins/appcache/application_1734051393214_0003/container_1734051393214_0003_01_000001/launch_container.sh] 2024-12-13T00:57:48,466 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/MiniMRCluster_1870862804/yarn-2296387876/MiniMRCluster_1870862804-localDir-nm-0_2/usercache/jenkins/appcache/application_1734051393214_0003/container_1734051393214_0003_01_000001/container_tokens] 2024-12-13T00:57:48,466 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/MiniMRCluster_1870862804/yarn-2296387876/MiniMRCluster_1870862804-localDir-nm-0_2/usercache/jenkins/appcache/application_1734051393214_0003/container_1734051393214_0003_01_000001/sysfs] 2024-12-13T00:57:48,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742027_1203 (size=127628) 2024-12-13T00:57:48,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742027_1203 (size=127628) 2024-12-13T00:57:48,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742027_1203 (size=127628) 2024-12-13T00:57:48,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742028_1204 (size=2172137) 2024-12-13T00:57:48,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742028_1204 (size=2172137) 2024-12-13T00:57:48,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742028_1204 (size=2172137) 2024-12-13T00:57:48,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742029_1205 (size=213228) 2024-12-13T00:57:48,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742029_1205 (size=213228) 2024-12-13T00:57:48,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742029_1205 (size=213228) 2024-12-13T00:57:48,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742030_1206 (size=1877034) 2024-12-13T00:57:48,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742030_1206 (size=1877034) 2024-12-13T00:57:48,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742030_1206 (size=1877034) 2024-12-13T00:57:48,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742031_1207 (size=533455) 2024-12-13T00:57:48,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742031_1207 (size=533455) 2024-12-13T00:57:48,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742031_1207 (size=533455) 2024-12-13T00:57:48,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742032_1208 (size=7280644) 2024-12-13T00:57:48,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742032_1208 (size=7280644) 2024-12-13T00:57:48,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742032_1208 (size=7280644) 2024-12-13T00:57:48,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742033_1209 (size=4188619) 2024-12-13T00:57:48,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742033_1209 (size=4188619) 2024-12-13T00:57:48,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742033_1209 (size=4188619) 2024-12-13T00:57:48,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742034_1210 (size=20406) 2024-12-13T00:57:48,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742034_1210 (size=20406) 2024-12-13T00:57:48,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742034_1210 (size=20406) 2024-12-13T00:57:48,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742035_1211 (size=75495) 2024-12-13T00:57:48,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742035_1211 (size=75495) 2024-12-13T00:57:48,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742035_1211 (size=75495) 2024-12-13T00:57:48,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742036_1212 (size=45609) 2024-12-13T00:57:48,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742036_1212 (size=45609) 2024-12-13T00:57:48,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742036_1212 (size=45609) 2024-12-13T00:57:48,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742037_1213 (size=110084) 2024-12-13T00:57:48,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742037_1213 (size=110084) 2024-12-13T00:57:48,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742037_1213 (size=110084) 2024-12-13T00:57:48,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742038_1214 (size=451756) 2024-12-13T00:57:48,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742038_1214 (size=451756) 2024-12-13T00:57:48,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742038_1214 (size=451756) 2024-12-13T00:57:48,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742039_1215 (size=1323991) 2024-12-13T00:57:48,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742039_1215 (size=1323991) 2024-12-13T00:57:48,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742039_1215 (size=1323991) 2024-12-13T00:57:48,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742040_1216 (size=23076) 2024-12-13T00:57:48,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742040_1216 (size=23076) 2024-12-13T00:57:48,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742040_1216 (size=23076) 2024-12-13T00:57:48,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742041_1217 (size=126803) 2024-12-13T00:57:48,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742041_1217 (size=126803) 2024-12-13T00:57:48,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742041_1217 (size=126803) 2024-12-13T00:57:48,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742042_1218 (size=322274) 2024-12-13T00:57:48,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742042_1218 (size=322274) 2024-12-13T00:57:48,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742042_1218 (size=322274) 2024-12-13T00:57:48,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742043_1219 (size=1832290) 2024-12-13T00:57:48,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742043_1219 (size=1832290) 2024-12-13T00:57:48,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742043_1219 (size=1832290) 2024-12-13T00:57:48,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742044_1220 (size=30081) 2024-12-13T00:57:48,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742044_1220 (size=30081) 2024-12-13T00:57:48,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742044_1220 (size=30081) 2024-12-13T00:57:48,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742045_1221 (size=53616) 2024-12-13T00:57:48,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742045_1221 (size=53616) 2024-12-13T00:57:48,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742045_1221 (size=53616) 2024-12-13T00:57:48,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742046_1222 (size=29229) 2024-12-13T00:57:48,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742046_1222 (size=29229) 2024-12-13T00:57:48,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742046_1222 (size=29229) 2024-12-13T00:57:48,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742047_1223 (size=169089) 2024-12-13T00:57:48,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742047_1223 (size=169089) 2024-12-13T00:57:48,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742047_1223 (size=169089) 2024-12-13T00:57:48,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742048_1224 (size=5175431) 2024-12-13T00:57:48,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742048_1224 (size=5175431) 2024-12-13T00:57:48,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742048_1224 (size=5175431) 2024-12-13T00:57:48,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742049_1225 (size=136454) 2024-12-13T00:57:48,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742049_1225 (size=136454) 2024-12-13T00:57:48,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742049_1225 (size=136454) 2024-12-13T00:57:48,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742050_1226 (size=907855) 2024-12-13T00:57:48,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742050_1226 (size=907855) 2024-12-13T00:57:48,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742050_1226 (size=907855) 2024-12-13T00:57:48,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742051_1227 (size=3317408) 2024-12-13T00:57:48,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742051_1227 (size=3317408) 2024-12-13T00:57:48,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742051_1227 (size=3317408) 2024-12-13T00:57:48,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742052_1228 (size=6350924) 2024-12-13T00:57:48,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742052_1228 (size=6350924) 2024-12-13T00:57:48,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742052_1228 (size=6350924) 2024-12-13T00:57:48,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742053_1229 (size=503880) 2024-12-13T00:57:48,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742053_1229 (size=503880) 2024-12-13T00:57:48,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742053_1229 (size=503880) 2024-12-13T00:57:48,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742054_1230 (size=4695811) 2024-12-13T00:57:48,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742054_1230 (size=4695811) 2024-12-13T00:57:48,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742054_1230 (size=4695811) 2024-12-13T00:57:48,975 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-13T00:57:48,978 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb0-testConsecutiveExports' hfile list 2024-12-13T00:57:48,981 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=13.3 K 2024-12-13T00:57:48,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742055_1231 (size=338) 2024-12-13T00:57:48,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742055_1231 (size=338) 2024-12-13T00:57:48,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742055_1231 (size=338) 2024-12-13T00:57:49,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742056_1232 (size=15) 2024-12-13T00:57:49,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742056_1232 (size=15) 2024-12-13T00:57:49,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742056_1232 (size=15) 2024-12-13T00:57:49,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742057_1233 (size=304934) 2024-12-13T00:57:49,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742057_1233 (size=304934) 2024-12-13T00:57:49,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742057_1233 (size=304934) 2024-12-13T00:57:49,075 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-13T00:57:49,075 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-13T00:57:49,445 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734051393214_0004_000001 (auth:SIMPLE) from 127.0.0.1:51254 2024-12-13T00:57:49,556 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-13T00:57:52,764 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-13T00:57:54,530 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports 2024-12-13T00:57:54,530 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports Metrics about Tables on a single HBase RegionServer 2024-12-13T00:57:55,605 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734051393214_0004_000001 (auth:SIMPLE) from 127.0.0.1:53600 2024-12-13T00:57:55,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742058_1234 (size=350608) 2024-12-13T00:57:55,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742058_1234 (size=350608) 2024-12-13T00:57:55,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742058_1234 (size=350608) 2024-12-13T00:57:57,960 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734051393214_0004_000001 (auth:SIMPLE) from 127.0.0.1:37982 2024-12-13T00:58:03,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742059_1235 (size=17447) 2024-12-13T00:58:03,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742059_1235 (size=17447) 2024-12-13T00:58:03,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742059_1235 (size=17447) 2024-12-13T00:58:03,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742060_1236 (size=462) 2024-12-13T00:58:03,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742060_1236 (size=462) 2024-12-13T00:58:03,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742060_1236 (size=462) 2024-12-13T00:58:03,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742061_1237 (size=17447) 2024-12-13T00:58:03,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742061_1237 (size=17447) 2024-12-13T00:58:03,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742061_1237 (size=17447) 2024-12-13T00:58:04,012 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/MiniMRCluster_1870862804/yarn-2296387876/MiniMRCluster_1870862804-localDir-nm-1_1/usercache/jenkins/appcache/application_1734051393214_0004/container_1734051393214_0004_01_000002/launch_container.sh] 2024-12-13T00:58:04,012 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/MiniMRCluster_1870862804/yarn-2296387876/MiniMRCluster_1870862804-localDir-nm-1_1/usercache/jenkins/appcache/application_1734051393214_0004/container_1734051393214_0004_01_000002/container_tokens] 2024-12-13T00:58:04,012 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/MiniMRCluster_1870862804/yarn-2296387876/MiniMRCluster_1870862804-localDir-nm-1_1/usercache/jenkins/appcache/application_1734051393214_0004/container_1734051393214_0004_01_000002/sysfs] 2024-12-13T00:58:04,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742062_1238 (size=350608) 2024-12-13T00:58:04,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742062_1238 (size=350608) 2024-12-13T00:58:04,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742062_1238 (size=350608) 2024-12-13T00:58:04,036 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734051393214_0004_000001 (auth:SIMPLE) from 127.0.0.1:41080 2024-12-13T00:58:05,242 INFO [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export 2024-12-13T00:58:05,242 INFO [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity. 2024-12-13T00:58:05,245 INFO [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: snaptb0-testConsecutiveExports 2024-12-13T00:58:05,245 INFO [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot 2024-12-13T00:58:05,245 INFO [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state 2024-12-13T00:58:05,245 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1349531497_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/snaptb0-testConsecutiveExports at hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-13T00:58:05,247 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-13T00:58:05,247 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-13T00:58:05,247 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in org.apache.hadoop.fs.LocalFileSystem@6d668592 in root file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/local-export-1734051466862/.hbase-snapshot/snaptb0-testConsecutiveExports at file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/local-export-1734051466862/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-13T00:58:05,247 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/local-export-1734051466862/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-13T00:58:05,247 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/local-export-1734051466862/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-13T00:58:05,250 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/local-export-1734051466862, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/local-export-1734051466862, srcFsUri=hdfs://localhost:46775, srcDir=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee 2024-12-13T00:58:05,290 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:46775, inputRoot=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee 2024-12-13T00:58:05,290 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=org.apache.hadoop.fs.LocalFileSystem@6d668592, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/local-export-1734051466862, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/local-export-1734051466862/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-13T00:58:05,293 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-13T00:58:05,299 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/snaptb0-testConsecutiveExports to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/local-export-1734051466862/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-13T00:58:05,322 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar 2024-12-13T00:58:05,322 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-13T00:58:05,323 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-13T00:58:05,323 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-13T00:58:06,443 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/hadoop-10750837473280111804.jar 2024-12-13T00:58:06,443 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-13T00:58:06,444 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-13T00:58:06,535 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/hadoop-18363602874230949118.jar 2024-12-13T00:58:06,535 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-13T00:58:06,536 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-13T00:58:06,536 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-13T00:58:06,536 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-13T00:58:06,537 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-13T00:58:06,537 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-13T00:58:06,537 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-13T00:58:06,538 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-13T00:58:06,538 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-13T00:58:06,538 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-13T00:58:06,539 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-13T00:58:06,539 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-13T00:58:06,539 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-13T00:58:06,539 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-13T00:58:06,539 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-13T00:58:06,540 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-13T00:58:06,540 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-13T00:58:06,540 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-13T00:58:06,540 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-13T00:58:06,541 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-13T00:58:06,541 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-13T00:58:06,541 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-13T00:58:06,541 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-13T00:58:06,542 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-13T00:58:06,542 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-13T00:58:06,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742063_1239 (size=127628) 2024-12-13T00:58:06,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742063_1239 (size=127628) 2024-12-13T00:58:06,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742063_1239 (size=127628) 2024-12-13T00:58:06,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742064_1240 (size=2172137) 2024-12-13T00:58:06,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742064_1240 (size=2172137) 2024-12-13T00:58:06,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742064_1240 (size=2172137) 2024-12-13T00:58:06,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742065_1241 (size=213228) 2024-12-13T00:58:06,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742065_1241 (size=213228) 2024-12-13T00:58:06,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742065_1241 (size=213228) 2024-12-13T00:58:06,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742066_1242 (size=6350924) 2024-12-13T00:58:06,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742066_1242 (size=6350924) 2024-12-13T00:58:06,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742066_1242 (size=6350924) 2024-12-13T00:58:06,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742067_1243 (size=1877034) 2024-12-13T00:58:06,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742067_1243 (size=1877034) 2024-12-13T00:58:06,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742067_1243 (size=1877034) 2024-12-13T00:58:06,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742068_1244 (size=533455) 2024-12-13T00:58:06,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742068_1244 (size=533455) 2024-12-13T00:58:06,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742068_1244 (size=533455) 2024-12-13T00:58:06,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742069_1245 (size=7280644) 2024-12-13T00:58:06,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742069_1245 (size=7280644) 2024-12-13T00:58:06,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742069_1245 (size=7280644) 2024-12-13T00:58:06,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742070_1246 (size=4188619) 2024-12-13T00:58:06,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742070_1246 (size=4188619) 2024-12-13T00:58:06,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742070_1246 (size=4188619) 2024-12-13T00:58:06,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742071_1247 (size=20406) 2024-12-13T00:58:06,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742071_1247 (size=20406) 2024-12-13T00:58:06,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742071_1247 (size=20406) 2024-12-13T00:58:06,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742072_1248 (size=75495) 2024-12-13T00:58:06,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742072_1248 (size=75495) 2024-12-13T00:58:06,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742072_1248 (size=75495) 2024-12-13T00:58:06,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742073_1249 (size=45609) 2024-12-13T00:58:06,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742073_1249 (size=45609) 2024-12-13T00:58:06,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742073_1249 (size=45609) 2024-12-13T00:58:07,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742074_1250 (size=110084) 2024-12-13T00:58:07,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742074_1250 (size=110084) 2024-12-13T00:58:07,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742074_1250 (size=110084) 2024-12-13T00:58:07,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742075_1251 (size=1323991) 2024-12-13T00:58:07,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742075_1251 (size=1323991) 2024-12-13T00:58:07,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742075_1251 (size=1323991) 2024-12-13T00:58:07,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742076_1252 (size=23076) 2024-12-13T00:58:07,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742076_1252 (size=23076) 2024-12-13T00:58:07,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742076_1252 (size=23076) 2024-12-13T00:58:07,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742077_1253 (size=126803) 2024-12-13T00:58:07,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742077_1253 (size=126803) 2024-12-13T00:58:07,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742077_1253 (size=126803) 2024-12-13T00:58:07,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742078_1254 (size=322274) 2024-12-13T00:58:07,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742078_1254 (size=322274) 2024-12-13T00:58:07,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742078_1254 (size=322274) 2024-12-13T00:58:07,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742079_1255 (size=1832290) 2024-12-13T00:58:07,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742079_1255 (size=1832290) 2024-12-13T00:58:07,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742079_1255 (size=1832290) 2024-12-13T00:58:07,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742080_1256 (size=30081) 2024-12-13T00:58:07,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742080_1256 (size=30081) 2024-12-13T00:58:07,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742080_1256 (size=30081) 2024-12-13T00:58:07,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742081_1257 (size=53616) 2024-12-13T00:58:07,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742081_1257 (size=53616) 2024-12-13T00:58:07,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742081_1257 (size=53616) 2024-12-13T00:58:07,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742082_1258 (size=29229) 2024-12-13T00:58:07,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742082_1258 (size=29229) 2024-12-13T00:58:07,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742082_1258 (size=29229) 2024-12-13T00:58:07,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742083_1259 (size=169089) 2024-12-13T00:58:07,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742083_1259 (size=169089) 2024-12-13T00:58:07,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742083_1259 (size=169089) 2024-12-13T00:58:07,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742084_1260 (size=5175431) 2024-12-13T00:58:07,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742084_1260 (size=5175431) 2024-12-13T00:58:07,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742084_1260 (size=5175431) 2024-12-13T00:58:07,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742085_1261 (size=136454) 2024-12-13T00:58:07,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742085_1261 (size=136454) 2024-12-13T00:58:07,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742085_1261 (size=136454) 2024-12-13T00:58:07,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742086_1262 (size=907855) 2024-12-13T00:58:07,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742086_1262 (size=907855) 2024-12-13T00:58:07,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742086_1262 (size=907855) 2024-12-13T00:58:07,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742087_1263 (size=3317408) 2024-12-13T00:58:07,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742087_1263 (size=3317408) 2024-12-13T00:58:07,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742087_1263 (size=3317408) 2024-12-13T00:58:07,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742088_1264 (size=451756) 2024-12-13T00:58:07,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742088_1264 (size=451756) 2024-12-13T00:58:07,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742088_1264 (size=451756) 2024-12-13T00:58:07,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742089_1265 (size=503880) 2024-12-13T00:58:07,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742089_1265 (size=503880) 2024-12-13T00:58:07,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742089_1265 (size=503880) 2024-12-13T00:58:07,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742090_1266 (size=4695811) 2024-12-13T00:58:07,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742090_1266 (size=4695811) 2024-12-13T00:58:07,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742090_1266 (size=4695811) 2024-12-13T00:58:07,867 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-13T00:58:07,870 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb0-testConsecutiveExports' hfile list 2024-12-13T00:58:07,872 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=13.3 K 2024-12-13T00:58:07,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742091_1267 (size=338) 2024-12-13T00:58:07,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742091_1267 (size=338) 2024-12-13T00:58:07,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742091_1267 (size=338) 2024-12-13T00:58:07,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742092_1268 (size=15) 2024-12-13T00:58:07,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742092_1268 (size=15) 2024-12-13T00:58:07,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742092_1268 (size=15) 2024-12-13T00:58:07,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742093_1269 (size=304934) 2024-12-13T00:58:07,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742093_1269 (size=304934) 2024-12-13T00:58:07,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742093_1269 (size=304934) 2024-12-13T00:58:10,111 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-13T00:58:10,112 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-13T00:58:10,115 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734051393214_0004_000001 (auth:SIMPLE) from 127.0.0.1:44888 2024-12-13T00:58:10,133 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/MiniMRCluster_1870862804/yarn-2296387876/MiniMRCluster_1870862804-localDir-nm-0_1/usercache/jenkins/appcache/application_1734051393214_0004/container_1734051393214_0004_01_000001/launch_container.sh] 2024-12-13T00:58:10,133 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/MiniMRCluster_1870862804/yarn-2296387876/MiniMRCluster_1870862804-localDir-nm-0_1/usercache/jenkins/appcache/application_1734051393214_0004/container_1734051393214_0004_01_000001/container_tokens] 2024-12-13T00:58:10,134 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/MiniMRCluster_1870862804/yarn-2296387876/MiniMRCluster_1870862804-localDir-nm-0_1/usercache/jenkins/appcache/application_1734051393214_0004/container_1734051393214_0004_01_000001/sysfs] 2024-12-13T00:58:10,996 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734051393214_0005_000001 (auth:SIMPLE) from 127.0.0.1:41084 2024-12-13T00:58:17,212 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734051393214_0005_000001 (auth:SIMPLE) from 127.0.0.1:34946 2024-12-13T00:58:17,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742094_1270 (size=350608) 2024-12-13T00:58:17,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742094_1270 (size=350608) 2024-12-13T00:58:17,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742094_1270 (size=350608) 2024-12-13T00:58:19,591 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734051393214_0005_000001 (auth:SIMPLE) from 127.0.0.1:57218 2024-12-13T00:58:22,772 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-13T00:58:24,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742095_1271 (size=16925) 2024-12-13T00:58:24,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742095_1271 (size=16925) 2024-12-13T00:58:24,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742095_1271 (size=16925) 2024-12-13T00:58:24,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742096_1272 (size=462) 2024-12-13T00:58:24,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742096_1272 (size=462) 2024-12-13T00:58:24,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742096_1272 (size=462) 2024-12-13T00:58:24,251 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/MiniMRCluster_1870862804/yarn-2296387876/MiniMRCluster_1870862804-localDir-nm-1_3/usercache/jenkins/appcache/application_1734051393214_0005/container_1734051393214_0005_01_000002/launch_container.sh] 2024-12-13T00:58:24,251 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/MiniMRCluster_1870862804/yarn-2296387876/MiniMRCluster_1870862804-localDir-nm-1_3/usercache/jenkins/appcache/application_1734051393214_0005/container_1734051393214_0005_01_000002/container_tokens] 2024-12-13T00:58:24,252 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/MiniMRCluster_1870862804/yarn-2296387876/MiniMRCluster_1870862804-localDir-nm-1_3/usercache/jenkins/appcache/application_1734051393214_0005/container_1734051393214_0005_01_000002/sysfs] 2024-12-13T00:58:24,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742097_1273 (size=16925) 2024-12-13T00:58:24,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742097_1273 (size=16925) 2024-12-13T00:58:24,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742097_1273 (size=16925) 2024-12-13T00:58:24,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742098_1274 (size=350608) 2024-12-13T00:58:24,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742098_1274 (size=350608) 2024-12-13T00:58:24,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742098_1274 (size=350608) 2024-12-13T00:58:24,309 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734051393214_0005_000001 (auth:SIMPLE) from 127.0.0.1:39038 2024-12-13T00:58:26,208 INFO [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export 2024-12-13T00:58:26,209 INFO [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity. 2024-12-13T00:58:26,213 INFO [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: snaptb0-testConsecutiveExports 2024-12-13T00:58:26,213 INFO [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot 2024-12-13T00:58:26,213 INFO [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state 2024-12-13T00:58:26,213 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1349531497_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/snaptb0-testConsecutiveExports at hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-13T00:58:26,214 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-13T00:58:26,214 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-13T00:58:26,214 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in org.apache.hadoop.fs.LocalFileSystem@6d668592 in root file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/local-export-1734051466862/.hbase-snapshot/snaptb0-testConsecutiveExports at file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/local-export-1734051466862/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-13T00:58:26,215 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/local-export-1734051466862/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-13T00:58:26,215 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/local-export-1734051466862/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-13T00:58:26,240 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testConsecutiveExports 2024-12-13T00:58:26,241 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testConsecutiveExports 2024-12-13T00:58:26,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] procedure2.ProcedureExecutor(1098): Stored pid=92, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testConsecutiveExports 2024-12-13T00:58:26,245 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734051506244"}]},"ts":"1734051506244"} 2024-12-13T00:58:26,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=92 2024-12-13T00:58:26,247 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testConsecutiveExports, state=DISABLING in hbase:meta 2024-12-13T00:58:26,250 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set testtb-testConsecutiveExports to state=DISABLING 2024-12-13T00:58:26,251 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=93, ppid=92, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testConsecutiveExports}] 2024-12-13T00:58:26,252 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=94, ppid=93, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=9fbedc17e1e6db0c89da7568df206c9d, UNASSIGN}, {pid=95, ppid=93, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=58b820ea3d545af749e330a65348a6c9, UNASSIGN}] 2024-12-13T00:58:26,253 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=95, ppid=93, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=58b820ea3d545af749e330a65348a6c9, UNASSIGN 2024-12-13T00:58:26,254 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=94, ppid=93, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=9fbedc17e1e6db0c89da7568df206c9d, UNASSIGN 2024-12-13T00:58:26,254 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=94 updating hbase:meta row=9fbedc17e1e6db0c89da7568df206c9d, regionState=CLOSING, regionLocation=9812beb1e4cd,45231,1734051384968 2024-12-13T00:58:26,254 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=95 updating hbase:meta row=58b820ea3d545af749e330a65348a6c9, regionState=CLOSING, regionLocation=9812beb1e4cd,40687,1734051385054 2024-12-13T00:58:26,256 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-13T00:58:26,256 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=96, ppid=94, state=RUNNABLE; CloseRegionProcedure 9fbedc17e1e6db0c89da7568df206c9d, server=9812beb1e4cd,45231,1734051384968}] 2024-12-13T00:58:26,256 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-13T00:58:26,257 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=97, ppid=95, state=RUNNABLE; CloseRegionProcedure 58b820ea3d545af749e330a65348a6c9, server=9812beb1e4cd,40687,1734051385054}] 2024-12-13T00:58:26,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=92 2024-12-13T00:58:26,408 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,40687,1734051385054 2024-12-13T00:58:26,408 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,45231,1734051384968 2024-12-13T00:58:26,409 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] handler.UnassignRegionHandler(124): Close 9fbedc17e1e6db0c89da7568df206c9d 2024-12-13T00:58:26,409 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] handler.UnassignRegionHandler(124): Close 58b820ea3d545af749e330a65348a6c9 2024-12-13T00:58:26,410 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-13T00:58:26,410 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-13T00:58:26,410 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1681): Closing 9fbedc17e1e6db0c89da7568df206c9d, disabling compactions & flushes 2024-12-13T00:58:26,410 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] regionserver.HRegion(1681): Closing 58b820ea3d545af749e330a65348a6c9, disabling compactions & flushes 2024-12-13T00:58:26,410 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1703): Closing region testtb-testConsecutiveExports,,1734051464323.9fbedc17e1e6db0c89da7568df206c9d. 2024-12-13T00:58:26,410 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] regionserver.HRegion(1703): Closing region testtb-testConsecutiveExports,1,1734051464323.58b820ea3d545af749e330a65348a6c9. 2024-12-13T00:58:26,410 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testConsecutiveExports,,1734051464323.9fbedc17e1e6db0c89da7568df206c9d. 2024-12-13T00:58:26,410 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testConsecutiveExports,1,1734051464323.58b820ea3d545af749e330a65348a6c9. 2024-12-13T00:58:26,410 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1791): Acquired close lock on testtb-testConsecutiveExports,,1734051464323.9fbedc17e1e6db0c89da7568df206c9d. after waiting 0 ms 2024-12-13T00:58:26,410 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] regionserver.HRegion(1791): Acquired close lock on testtb-testConsecutiveExports,1,1734051464323.58b820ea3d545af749e330a65348a6c9. after waiting 0 ms 2024-12-13T00:58:26,410 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1801): Updates disabled for region testtb-testConsecutiveExports,,1734051464323.9fbedc17e1e6db0c89da7568df206c9d. 2024-12-13T00:58:26,410 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] regionserver.HRegion(1801): Updates disabled for region testtb-testConsecutiveExports,1,1734051464323.58b820ea3d545af749e330a65348a6c9. 2024-12-13T00:58:26,420 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testConsecutiveExports/9fbedc17e1e6db0c89da7568df206c9d/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-13T00:58:26,420 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testConsecutiveExports/58b820ea3d545af749e330a65348a6c9/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-13T00:58:26,421 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-13T00:58:26,421 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-13T00:58:26,421 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1922): Closed testtb-testConsecutiveExports,,1734051464323.9fbedc17e1e6db0c89da7568df206c9d. 2024-12-13T00:58:26,421 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] regionserver.HRegion(1922): Closed testtb-testConsecutiveExports,1,1734051464323.58b820ea3d545af749e330a65348a6c9. 2024-12-13T00:58:26,421 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1635): Region close journal for 9fbedc17e1e6db0c89da7568df206c9d: 2024-12-13T00:58:26,421 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] regionserver.HRegion(1635): Region close journal for 58b820ea3d545af749e330a65348a6c9: 2024-12-13T00:58:26,425 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=95 updating hbase:meta row=58b820ea3d545af749e330a65348a6c9, regionState=CLOSED 2024-12-13T00:58:26,427 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=94 updating hbase:meta row=9fbedc17e1e6db0c89da7568df206c9d, regionState=CLOSED 2024-12-13T00:58:26,427 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] handler.UnassignRegionHandler(170): Closed 58b820ea3d545af749e330a65348a6c9 2024-12-13T00:58:26,428 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] handler.UnassignRegionHandler(170): Closed 9fbedc17e1e6db0c89da7568df206c9d 2024-12-13T00:58:26,437 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=97, resume processing ppid=95 2024-12-13T00:58:26,438 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=97, ppid=95, state=SUCCESS; CloseRegionProcedure 58b820ea3d545af749e330a65348a6c9, server=9812beb1e4cd,40687,1734051385054 in 176 msec 2024-12-13T00:58:26,440 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=96, resume processing ppid=94 2024-12-13T00:58:26,440 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=96, ppid=94, state=SUCCESS; CloseRegionProcedure 9fbedc17e1e6db0c89da7568df206c9d, server=9812beb1e4cd,45231,1734051384968 in 178 msec 2024-12-13T00:58:26,441 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=95, ppid=93, state=SUCCESS; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=58b820ea3d545af749e330a65348a6c9, UNASSIGN in 186 msec 2024-12-13T00:58:26,443 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=94, resume processing ppid=93 2024-12-13T00:58:26,443 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=94, ppid=93, state=SUCCESS; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=9fbedc17e1e6db0c89da7568df206c9d, UNASSIGN in 188 msec 2024-12-13T00:58:26,446 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=93, resume processing ppid=92 2024-12-13T00:58:26,446 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=93, ppid=92, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testConsecutiveExports in 193 msec 2024-12-13T00:58:26,447 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734051506447"}]},"ts":"1734051506447"} 2024-12-13T00:58:26,451 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testConsecutiveExports, state=DISABLED in hbase:meta 2024-12-13T00:58:26,453 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set testtb-testConsecutiveExports to state=DISABLED 2024-12-13T00:58:26,455 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=92, state=SUCCESS; DisableTableProcedure table=testtb-testConsecutiveExports in 213 msec 2024-12-13T00:58:26,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=92 2024-12-13T00:58:26,550 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testConsecutiveExports, procId: 92 completed 2024-12-13T00:58:26,551 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testConsecutiveExports 2024-12-13T00:58:26,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] procedure2.ProcedureExecutor(1098): Stored pid=98, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-13T00:58:26,559 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=98, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-13T00:58:26,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testConsecutiveExports 2024-12-13T00:58:26,560 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=98, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-13T00:58:26,561 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40687 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testConsecutiveExports 2024-12-13T00:58:26,564 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testConsecutiveExports/9fbedc17e1e6db0c89da7568df206c9d 2024-12-13T00:58:26,565 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-13T00:58:26,565 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46075-0x10088dfff310001, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-13T00:58:26,565 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45231-0x10088dfff310002, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-13T00:58:26,565 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40687-0x10088dfff310003, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-13T00:58:26,566 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-12-13T00:58:26,566 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-12-13T00:58:26,566 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-12-13T00:58:26,569 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-12-13T00:58:26,569 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testConsecutiveExports/58b820ea3d545af749e330a65348a6c9 2024-12-13T00:58:26,569 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46075-0x10088dfff310001, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-13T00:58:26,569 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46075-0x10088dfff310001, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T00:58:26,569 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-13T00:58:26,569 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T00:58:26,569 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45231-0x10088dfff310002, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-13T00:58:26,570 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45231-0x10088dfff310002, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T00:58:26,570 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40687-0x10088dfff310003, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-13T00:58:26,570 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40687-0x10088dfff310003, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T00:58:26,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-13T00:58:26,576 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testConsecutiveExports/9fbedc17e1e6db0c89da7568df206c9d/cf, FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testConsecutiveExports/9fbedc17e1e6db0c89da7568df206c9d/recovered.edits] 2024-12-13T00:58:26,576 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testConsecutiveExports/58b820ea3d545af749e330a65348a6c9/cf, FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testConsecutiveExports/58b820ea3d545af749e330a65348a6c9/recovered.edits] 2024-12-13T00:58:26,585 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testConsecutiveExports/9fbedc17e1e6db0c89da7568df206c9d/cf/4f19942efac348ebb09cd531da5847df to hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/archive/data/default/testtb-testConsecutiveExports/9fbedc17e1e6db0c89da7568df206c9d/cf/4f19942efac348ebb09cd531da5847df 2024-12-13T00:58:26,594 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testConsecutiveExports/9fbedc17e1e6db0c89da7568df206c9d/recovered.edits/9.seqid to hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/archive/data/default/testtb-testConsecutiveExports/9fbedc17e1e6db0c89da7568df206c9d/recovered.edits/9.seqid 2024-12-13T00:58:26,594 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testConsecutiveExports/9fbedc17e1e6db0c89da7568df206c9d 2024-12-13T00:58:26,596 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testConsecutiveExports/58b820ea3d545af749e330a65348a6c9/cf/ce3b104a4e4844d6a287844d00e6d347 to hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/archive/data/default/testtb-testConsecutiveExports/58b820ea3d545af749e330a65348a6c9/cf/ce3b104a4e4844d6a287844d00e6d347 2024-12-13T00:58:26,633 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testConsecutiveExports/58b820ea3d545af749e330a65348a6c9/recovered.edits/9.seqid to hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/archive/data/default/testtb-testConsecutiveExports/58b820ea3d545af749e330a65348a6c9/recovered.edits/9.seqid 2024-12-13T00:58:26,634 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testConsecutiveExports/58b820ea3d545af749e330a65348a6c9 2024-12-13T00:58:26,634 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testtb-testConsecutiveExports regions 2024-12-13T00:58:26,637 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=98, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-13T00:58:26,642 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testConsecutiveExports from hbase:meta 2024-12-13T00:58:26,645 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testConsecutiveExports' descriptor. 2024-12-13T00:58:26,646 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=98, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-13T00:58:26,646 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testConsecutiveExports' from region states. 2024-12-13T00:58:26,646 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports,,1734051464323.9fbedc17e1e6db0c89da7568df206c9d.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734051506646"}]},"ts":"9223372036854775807"} 2024-12-13T00:58:26,646 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports,1,1734051464323.58b820ea3d545af749e330a65348a6c9.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734051506646"}]},"ts":"9223372036854775807"} 2024-12-13T00:58:26,653 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-13T00:58:26,653 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 9fbedc17e1e6db0c89da7568df206c9d, NAME => 'testtb-testConsecutiveExports,,1734051464323.9fbedc17e1e6db0c89da7568df206c9d.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 58b820ea3d545af749e330a65348a6c9, NAME => 'testtb-testConsecutiveExports,1,1734051464323.58b820ea3d545af749e330a65348a6c9.', STARTKEY => '1', ENDKEY => ''}] 2024-12-13T00:58:26,653 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testConsecutiveExports' as deleted. 2024-12-13T00:58:26,654 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734051506653"}]},"ts":"9223372036854775807"} 2024-12-13T00:58:26,658 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testConsecutiveExports state from META 2024-12-13T00:58:26,665 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=98, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-13T00:58:26,666 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=98, state=SUCCESS; DeleteTableProcedure table=testtb-testConsecutiveExports in 114 msec 2024-12-13T00:58:26,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-13T00:58:26,675 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testConsecutiveExports, procId: 98 completed 2024-12-13T00:58:26,689 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testConsecutiveExports" 2024-12-13T00:58:26,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testConsecutiveExports 2024-12-13T00:58:26,713 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testConsecutiveExports" 2024-12-13T00:58:26,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testConsecutiveExports 2024-12-13T00:58:26,764 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testConsecutiveExports Thread=784 (was 784), OpenFileDescriptor=771 (was 772), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=698 (was 571) - SystemLoadAverage LEAK? -, ProcessCount=20 (was 17) - ProcessCount LEAK? -, AvailableMemoryMB=3883 (was 4315) 2024-12-13T00:58:26,764 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=784 is superior to 500 2024-12-13T00:58:26,787 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithMergeRegion Thread=784, OpenFileDescriptor=771, MaxFileDescriptor=1048576, SystemLoadAverage=698, ProcessCount=20, AvailableMemoryMB=3881 2024-12-13T00:58:26,788 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=784 is superior to 500 2024-12-13T00:58:26,790 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-13T00:58:26,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] procedure2.ProcedureExecutor(1098): Stored pid=99, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-13T00:58:26,793 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=99, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_PRE_OPERATION 2024-12-13T00:58:26,793 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T00:58:26,793 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithMergeRegion" procId is: 99 2024-12-13T00:58:26,794 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=99, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-13T00:58:26,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=99 2024-12-13T00:58:26,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742099_1275 (size=422) 2024-12-13T00:58:26,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742099_1275 (size=422) 2024-12-13T00:58:26,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742099_1275 (size=422) 2024-12-13T00:58:26,829 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 7291031b7331870285a02ea61c6bc826, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1734051506790.7291031b7331870285a02ea61c6bc826.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee 2024-12-13T00:58:26,840 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => e6cf1838296a4d970f499ee9f19b4ba1, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1734051506790.e6cf1838296a4d970f499ee9f19b4ba1.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee 2024-12-13T00:58:26,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742100_1276 (size=83) 2024-12-13T00:58:26,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742100_1276 (size=83) 2024-12-13T00:58:26,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742100_1276 (size=83) 2024-12-13T00:58:26,876 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion,,1734051506790.7291031b7331870285a02ea61c6bc826.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T00:58:26,876 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1681): Closing 7291031b7331870285a02ea61c6bc826, disabling compactions & flushes 2024-12-13T00:58:26,876 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion,,1734051506790.7291031b7331870285a02ea61c6bc826. 2024-12-13T00:58:26,876 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion,,1734051506790.7291031b7331870285a02ea61c6bc826. 2024-12-13T00:58:26,876 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,,1734051506790.7291031b7331870285a02ea61c6bc826. after waiting 0 ms 2024-12-13T00:58:26,876 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,,1734051506790.7291031b7331870285a02ea61c6bc826. 2024-12-13T00:58:26,876 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion,,1734051506790.7291031b7331870285a02ea61c6bc826. 2024-12-13T00:58:26,876 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1635): Region close journal for 7291031b7331870285a02ea61c6bc826: 2024-12-13T00:58:26,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742101_1277 (size=83) 2024-12-13T00:58:26,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742101_1277 (size=83) 2024-12-13T00:58:26,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742101_1277 (size=83) 2024-12-13T00:58:26,882 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion,1,1734051506790.e6cf1838296a4d970f499ee9f19b4ba1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T00:58:26,882 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1681): Closing e6cf1838296a4d970f499ee9f19b4ba1, disabling compactions & flushes 2024-12-13T00:58:26,882 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion,1,1734051506790.e6cf1838296a4d970f499ee9f19b4ba1. 2024-12-13T00:58:26,882 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1734051506790.e6cf1838296a4d970f499ee9f19b4ba1. 2024-12-13T00:58:26,882 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1734051506790.e6cf1838296a4d970f499ee9f19b4ba1. after waiting 0 ms 2024-12-13T00:58:26,882 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,1,1734051506790.e6cf1838296a4d970f499ee9f19b4ba1. 2024-12-13T00:58:26,882 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion,1,1734051506790.e6cf1838296a4d970f499ee9f19b4ba1. 2024-12-13T00:58:26,882 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1635): Region close journal for e6cf1838296a4d970f499ee9f19b4ba1: 2024-12-13T00:58:26,884 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=99, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_ADD_TO_META 2024-12-13T00:58:26,884 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion,,1734051506790.7291031b7331870285a02ea61c6bc826.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1734051506884"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734051506884"}]},"ts":"1734051506884"} 2024-12-13T00:58:26,884 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion,1,1734051506790.e6cf1838296a4d970f499ee9f19b4ba1.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1734051506884"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734051506884"}]},"ts":"1734051506884"} 2024-12-13T00:58:26,890 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-13T00:58:26,891 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=99, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-13T00:58:26,891 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734051506891"}]},"ts":"1734051506891"} 2024-12-13T00:58:26,894 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=ENABLING in hbase:meta 2024-12-13T00:58:26,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=99 2024-12-13T00:58:26,897 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(202): Hosts are {9812beb1e4cd=0} racks are {/default-rack=0} 2024-12-13T00:58:26,899 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-13T00:58:26,899 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-13T00:58:26,899 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-13T00:58:26,899 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-13T00:58:26,899 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-13T00:58:26,899 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-13T00:58:26,899 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-13T00:58:26,899 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=100, ppid=99, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=7291031b7331870285a02ea61c6bc826, ASSIGN}, {pid=101, ppid=99, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=e6cf1838296a4d970f499ee9f19b4ba1, ASSIGN}] 2024-12-13T00:58:26,901 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=101, ppid=99, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=e6cf1838296a4d970f499ee9f19b4ba1, ASSIGN 2024-12-13T00:58:26,901 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=100, ppid=99, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=7291031b7331870285a02ea61c6bc826, ASSIGN 2024-12-13T00:58:26,902 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=100, ppid=99, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=7291031b7331870285a02ea61c6bc826, ASSIGN; state=OFFLINE, location=9812beb1e4cd,46075,1734051384875; forceNewPlan=false, retain=false 2024-12-13T00:58:26,902 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=101, ppid=99, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=e6cf1838296a4d970f499ee9f19b4ba1, ASSIGN; state=OFFLINE, location=9812beb1e4cd,45231,1734051384968; forceNewPlan=false, retain=false 2024-12-13T00:58:27,053 INFO [9812beb1e4cd:38293 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-13T00:58:27,053 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=100 updating hbase:meta row=7291031b7331870285a02ea61c6bc826, regionState=OPENING, regionLocation=9812beb1e4cd,46075,1734051384875 2024-12-13T00:58:27,058 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=101 updating hbase:meta row=e6cf1838296a4d970f499ee9f19b4ba1, regionState=OPENING, regionLocation=9812beb1e4cd,45231,1734051384968 2024-12-13T00:58:27,070 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=102, ppid=101, state=RUNNABLE; OpenRegionProcedure e6cf1838296a4d970f499ee9f19b4ba1, server=9812beb1e4cd,45231,1734051384968}] 2024-12-13T00:58:27,071 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=103, ppid=100, state=RUNNABLE; OpenRegionProcedure 7291031b7331870285a02ea61c6bc826, server=9812beb1e4cd,46075,1734051384875}] 2024-12-13T00:58:27,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=99 2024-12-13T00:58:27,222 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,45231,1734051384968 2024-12-13T00:58:27,224 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,46075,1734051384875 2024-12-13T00:58:27,228 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemStateWithMergeRegion,1,1734051506790.e6cf1838296a4d970f499ee9f19b4ba1. 2024-12-13T00:58:27,228 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(7285): Opening region: {ENCODED => e6cf1838296a4d970f499ee9f19b4ba1, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1734051506790.e6cf1838296a4d970f499ee9f19b4ba1.', STARTKEY => '1', ENDKEY => ''} 2024-12-13T00:58:27,229 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion,1,1734051506790.e6cf1838296a4d970f499ee9f19b4ba1. service=AccessControlService 2024-12-13T00:58:27,229 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-13T00:58:27,229 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion e6cf1838296a4d970f499ee9f19b4ba1 2024-12-13T00:58:27,229 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion,1,1734051506790.e6cf1838296a4d970f499ee9f19b4ba1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T00:58:27,229 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(7327): checking encryption for e6cf1838296a4d970f499ee9f19b4ba1 2024-12-13T00:58:27,229 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(7330): checking classloading for e6cf1838296a4d970f499ee9f19b4ba1 2024-12-13T00:58:27,264 INFO [StoreOpener-e6cf1838296a4d970f499ee9f19b4ba1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region e6cf1838296a4d970f499ee9f19b4ba1 2024-12-13T00:58:27,267 INFO [StoreOpener-e6cf1838296a4d970f499ee9f19b4ba1-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e6cf1838296a4d970f499ee9f19b4ba1 columnFamilyName cf 2024-12-13T00:58:27,267 DEBUG [StoreOpener-e6cf1838296a4d970f499ee9f19b4ba1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T00:58:27,268 INFO [StoreOpener-e6cf1838296a4d970f499ee9f19b4ba1-1 {}] regionserver.HStore(327): Store=e6cf1838296a4d970f499ee9f19b4ba1/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T00:58:27,269 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion/e6cf1838296a4d970f499ee9f19b4ba1 2024-12-13T00:58:27,270 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion/e6cf1838296a4d970f499ee9f19b4ba1 2024-12-13T00:58:27,279 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemStateWithMergeRegion,,1734051506790.7291031b7331870285a02ea61c6bc826. 2024-12-13T00:58:27,279 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(7285): Opening region: {ENCODED => 7291031b7331870285a02ea61c6bc826, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1734051506790.7291031b7331870285a02ea61c6bc826.', STARTKEY => '', ENDKEY => '1'} 2024-12-13T00:58:27,280 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion,,1734051506790.7291031b7331870285a02ea61c6bc826. service=AccessControlService 2024-12-13T00:58:27,280 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-13T00:58:27,280 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(1085): writing seq id for e6cf1838296a4d970f499ee9f19b4ba1 2024-12-13T00:58:27,280 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion 7291031b7331870285a02ea61c6bc826 2024-12-13T00:58:27,280 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion,,1734051506790.7291031b7331870285a02ea61c6bc826.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T00:58:27,280 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(7327): checking encryption for 7291031b7331870285a02ea61c6bc826 2024-12-13T00:58:27,280 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(7330): checking classloading for 7291031b7331870285a02ea61c6bc826 2024-12-13T00:58:27,284 INFO [StoreOpener-7291031b7331870285a02ea61c6bc826-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 7291031b7331870285a02ea61c6bc826 2024-12-13T00:58:27,289 INFO [StoreOpener-7291031b7331870285a02ea61c6bc826-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7291031b7331870285a02ea61c6bc826 columnFamilyName cf 2024-12-13T00:58:27,289 DEBUG [StoreOpener-7291031b7331870285a02ea61c6bc826-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T00:58:27,290 INFO [StoreOpener-7291031b7331870285a02ea61c6bc826-1 {}] regionserver.HStore(327): Store=7291031b7331870285a02ea61c6bc826/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T00:58:27,291 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion/7291031b7331870285a02ea61c6bc826 2024-12-13T00:58:27,292 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion/7291031b7331870285a02ea61c6bc826 2024-12-13T00:58:27,295 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(1085): writing seq id for 7291031b7331870285a02ea61c6bc826 2024-12-13T00:58:27,300 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion/e6cf1838296a4d970f499ee9f19b4ba1/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T00:58:27,301 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(1102): Opened e6cf1838296a4d970f499ee9f19b4ba1; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64690286, jitterRate=-0.03603962063789368}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-13T00:58:27,302 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(1001): Region open journal for e6cf1838296a4d970f499ee9f19b4ba1: 2024-12-13T00:58:27,303 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion/7291031b7331870285a02ea61c6bc826/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T00:58:27,303 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(1102): Opened 7291031b7331870285a02ea61c6bc826; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64922975, jitterRate=-0.03257228434085846}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-13T00:58:27,303 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion,1,1734051506790.e6cf1838296a4d970f499ee9f19b4ba1., pid=102, masterSystemTime=1734051507222 2024-12-13T00:58:27,303 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(1001): Region open journal for 7291031b7331870285a02ea61c6bc826: 2024-12-13T00:58:27,305 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion,1,1734051506790.e6cf1838296a4d970f499ee9f19b4ba1. 2024-12-13T00:58:27,305 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemStateWithMergeRegion,1,1734051506790.e6cf1838296a4d970f499ee9f19b4ba1. 2024-12-13T00:58:27,306 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=101 updating hbase:meta row=e6cf1838296a4d970f499ee9f19b4ba1, regionState=OPEN, openSeqNum=2, regionLocation=9812beb1e4cd,45231,1734051384968 2024-12-13T00:58:27,307 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion,,1734051506790.7291031b7331870285a02ea61c6bc826., pid=103, masterSystemTime=1734051507223 2024-12-13T00:58:27,312 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion,,1734051506790.7291031b7331870285a02ea61c6bc826. 2024-12-13T00:58:27,313 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemStateWithMergeRegion,,1734051506790.7291031b7331870285a02ea61c6bc826. 2024-12-13T00:58:27,313 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=100 updating hbase:meta row=7291031b7331870285a02ea61c6bc826, regionState=OPEN, openSeqNum=2, regionLocation=9812beb1e4cd,46075,1734051384875 2024-12-13T00:58:27,316 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=102, resume processing ppid=101 2024-12-13T00:58:27,319 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=101, ppid=99, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=e6cf1838296a4d970f499ee9f19b4ba1, ASSIGN in 418 msec 2024-12-13T00:58:27,319 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=103, resume processing ppid=100 2024-12-13T00:58:27,319 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=103, ppid=100, state=SUCCESS; OpenRegionProcedure 7291031b7331870285a02ea61c6bc826, server=9812beb1e4cd,46075,1734051384875 in 244 msec 2024-12-13T00:58:27,321 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=102, ppid=101, state=SUCCESS; OpenRegionProcedure e6cf1838296a4d970f499ee9f19b4ba1, server=9812beb1e4cd,45231,1734051384968 in 240 msec 2024-12-13T00:58:27,321 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=100, resume processing ppid=99 2024-12-13T00:58:27,321 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=100, ppid=99, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=7291031b7331870285a02ea61c6bc826, ASSIGN in 420 msec 2024-12-13T00:58:27,323 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=99, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-13T00:58:27,323 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734051507323"}]},"ts":"1734051507323"} 2024-12-13T00:58:27,325 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=ENABLED in hbase:meta 2024-12-13T00:58:27,328 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=99, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_POST_OPERATION 2024-12-13T00:58:27,329 DEBUG [PEWorker-5 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testExportFileSystemStateWithMergeRegion jenkins: RWXCA 2024-12-13T00:58:27,333 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40687 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-12-13T00:58:27,335 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46075-0x10088dfff310001, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T00:58:27,335 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40687-0x10088dfff310003, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T00:58:27,336 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T00:58:27,336 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45231-0x10088dfff310002, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T00:58:27,339 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40687-0x10088dfff310003, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-13T00:58:27,339 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-13T00:58:27,339 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46075-0x10088dfff310001, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-13T00:58:27,339 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45231-0x10088dfff310002, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-13T00:58:27,340 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-13T00:58:27,340 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-13T00:58:27,341 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-13T00:58:27,341 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-13T00:58:27,342 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=99, state=SUCCESS; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 550 msec 2024-12-13T00:58:27,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=99 2024-12-13T00:58:27,399 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion, procId: 99 completed 2024-12-13T00:58:27,399 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testtb-testExportFileSystemStateWithMergeRegion get assigned. Timeout = 60000ms 2024-12-13T00:58:27,400 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-13T00:58:27,425 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testtb-testExportFileSystemStateWithMergeRegion assigned to meta. Checking AM states. 2024-12-13T00:58:27,426 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-13T00:58:27,426 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testtb-testExportFileSystemStateWithMergeRegion assigned. 2024-12-13T00:58:27,441 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-13T00:58:27,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734051507441 (current time:1734051507441). 2024-12-13T00:58:27,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-13T00:58:27,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion VERSION not specified, setting to 2 2024-12-13T00:58:27,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-13T00:58:27,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4292bfd8 to 127.0.0.1:49424 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2f45b2e3 2024-12-13T00:58:27,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5f70aa7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T00:58:27,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T00:58:27,478 INFO [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40212, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T00:58:27,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4292bfd8 to 127.0.0.1:49424 2024-12-13T00:58:27,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T00:58:27,481 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion' 2024-12-13T00:58:27,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x65255b98 to 127.0.0.1:49424 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@67f52f75 2024-12-13T00:58:27,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5aeab31e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T00:58:27,516 DEBUG [hconnection-0x1a476b91-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T00:58:27,519 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40218, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T00:58:27,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x65255b98 to 127.0.0.1:49424 2024-12-13T00:58:27,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T00:58:27,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-12-13T00:58:27,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-13T00:58:27,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] procedure2.ProcedureExecutor(1098): Stored pid=104, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-13T00:58:27,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 104 2024-12-13T00:58:27,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-13T00:58:27,544 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-13T00:58:27,546 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-13T00:58:27,549 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-13T00:58:27,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742102_1278 (size=215) 2024-12-13T00:58:27,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742102_1278 (size=215) 2024-12-13T00:58:27,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742102_1278 (size=215) 2024-12-13T00:58:27,589 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-13T00:58:27,589 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=105, ppid=104, state=RUNNABLE; SnapshotRegionProcedure 7291031b7331870285a02ea61c6bc826}, {pid=106, ppid=104, state=RUNNABLE; SnapshotRegionProcedure e6cf1838296a4d970f499ee9f19b4ba1}] 2024-12-13T00:58:27,590 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=106, ppid=104, state=RUNNABLE; SnapshotRegionProcedure e6cf1838296a4d970f499ee9f19b4ba1 2024-12-13T00:58:27,591 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=105, ppid=104, state=RUNNABLE; SnapshotRegionProcedure 7291031b7331870285a02ea61c6bc826 2024-12-13T00:58:27,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-13T00:58:27,742 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,46075,1734051384875 2024-12-13T00:58:27,742 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,45231,1734051384968 2024-12-13T00:58:27,743 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45231 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=106 2024-12-13T00:58:27,743 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46075 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=105 2024-12-13T00:58:27,744 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1734051506790.7291031b7331870285a02ea61c6bc826. 2024-12-13T00:58:27,744 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HRegion(2538): Flush status journal for 7291031b7331870285a02ea61c6bc826: 2024-12-13T00:58:27,744 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,,1734051506790.7291031b7331870285a02ea61c6bc826. for emptySnaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-13T00:58:27,745 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,,1734051506790.7291031b7331870285a02ea61c6bc826.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-13T00:58:27,745 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-13T00:58:27,745 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-13T00:58:27,747 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1734051506790.e6cf1838296a4d970f499ee9f19b4ba1. 2024-12-13T00:58:27,747 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HRegion(2538): Flush status journal for e6cf1838296a4d970f499ee9f19b4ba1: 2024-12-13T00:58:27,747 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,1,1734051506790.e6cf1838296a4d970f499ee9f19b4ba1. for emptySnaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-13T00:58:27,748 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,1,1734051506790.e6cf1838296a4d970f499ee9f19b4ba1.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-13T00:58:27,748 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-13T00:58:27,748 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-13T00:58:27,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742103_1279 (size=86) 2024-12-13T00:58:27,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742103_1279 (size=86) 2024-12-13T00:58:27,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742103_1279 (size=86) 2024-12-13T00:58:27,827 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1734051506790.7291031b7331870285a02ea61c6bc826. 2024-12-13T00:58:27,827 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=105 2024-12-13T00:58:27,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.HMaster(4106): Remote procedure done, pid=105 2024-12-13T00:58:27,828 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion on region 7291031b7331870285a02ea61c6bc826 2024-12-13T00:58:27,828 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=105, ppid=104, state=RUNNABLE; SnapshotRegionProcedure 7291031b7331870285a02ea61c6bc826 2024-12-13T00:58:27,832 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=105, ppid=104, state=SUCCESS; SnapshotRegionProcedure 7291031b7331870285a02ea61c6bc826 in 241 msec 2024-12-13T00:58:27,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742104_1280 (size=86) 2024-12-13T00:58:27,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742104_1280 (size=86) 2024-12-13T00:58:27,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742104_1280 (size=86) 2024-12-13T00:58:27,838 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1734051506790.e6cf1838296a4d970f499ee9f19b4ba1. 2024-12-13T00:58:27,838 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=106 2024-12-13T00:58:27,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.HMaster(4106): Remote procedure done, pid=106 2024-12-13T00:58:27,839 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion on region e6cf1838296a4d970f499ee9f19b4ba1 2024-12-13T00:58:27,839 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=106, ppid=104, state=RUNNABLE; SnapshotRegionProcedure e6cf1838296a4d970f499ee9f19b4ba1 2024-12-13T00:58:27,842 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=106, resume processing ppid=104 2024-12-13T00:58:27,843 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=106, ppid=104, state=SUCCESS; SnapshotRegionProcedure e6cf1838296a4d970f499ee9f19b4ba1 in 252 msec 2024-12-13T00:58:27,843 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-13T00:58:27,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-13T00:58:27,848 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-13T00:58:27,849 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-13T00:58:27,850 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-13T00:58:27,851 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-13T00:58:27,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742105_1281 (size=597) 2024-12-13T00:58:27,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742105_1281 (size=597) 2024-12-13T00:58:27,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742105_1281 (size=597) 2024-12-13T00:58:27,912 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-13T00:58:27,919 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-13T00:58:27,921 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithMergeRegion to hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-13T00:58:27,928 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-13T00:58:27,928 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 104 2024-12-13T00:58:27,930 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=104, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } in 395 msec 2024-12-13T00:58:28,070 DEBUG [master/9812beb1e4cd:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region e6cf1838296a4d970f499ee9f19b4ba1 changed from -1.0 to 0.0, refreshing cache 2024-12-13T00:58:28,070 DEBUG [master/9812beb1e4cd:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 7291031b7331870285a02ea61c6bc826 changed from -1.0 to 0.0, refreshing cache 2024-12-13T00:58:28,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-13T00:58:28,148 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion, procId: 104 completed 2024-12-13T00:58:28,158 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46075 {}] regionserver.HRegion(8254): writing data to region testtb-testExportFileSystemStateWithMergeRegion,,1734051506790.7291031b7331870285a02ea61c6bc826. with WAL disabled. Data may be lost in the event of a crash. 2024-12-13T00:58:28,168 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45231 {}] regionserver.HRegion(8254): writing data to region testtb-testExportFileSystemStateWithMergeRegion,1,1734051506790.e6cf1838296a4d970f499ee9f19b4ba1. with WAL disabled. Data may be lost in the event of a crash. 2024-12-13T00:58:28,177 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testExportFileSystemStateWithMergeRegion 2024-12-13T00:58:28,177 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testExportFileSystemStateWithMergeRegion,,1734051506790.7291031b7331870285a02ea61c6bc826. 2024-12-13T00:58:28,177 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-13T00:58:28,200 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-13T00:58:28,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734051508200 (current time:1734051508200). 2024-12-13T00:58:28,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-13T00:58:28,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testExportFileSystemStateWithMergeRegion VERSION not specified, setting to 2 2024-12-13T00:58:28,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-13T00:58:28,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x144c2bbd to 127.0.0.1:49424 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3576d146 2024-12-13T00:58:28,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@680caae8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T00:58:28,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T00:58:28,214 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40234, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T00:58:28,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x144c2bbd to 127.0.0.1:49424 2024-12-13T00:58:28,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T00:58:28,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0e863ec4 to 127.0.0.1:49424 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@df932b3 2024-12-13T00:58:28,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2f820e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T00:58:28,224 DEBUG [hconnection-0x6cbf2d0e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T00:58:28,225 INFO [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40250, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T00:58:28,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0e863ec4 to 127.0.0.1:49424 2024-12-13T00:58:28,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T00:58:28,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-12-13T00:58:28,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-13T00:58:28,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] procedure2.ProcedureExecutor(1098): Stored pid=107, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-13T00:58:28,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 107 2024-12-13T00:58:28,234 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-13T00:58:28,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=107 2024-12-13T00:58:28,235 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-13T00:58:28,237 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-13T00:58:28,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742106_1282 (size=210) 2024-12-13T00:58:28,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742106_1282 (size=210) 2024-12-13T00:58:28,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742106_1282 (size=210) 2024-12-13T00:58:28,253 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-13T00:58:28,253 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=108, ppid=107, state=RUNNABLE; SnapshotRegionProcedure 7291031b7331870285a02ea61c6bc826}, {pid=109, ppid=107, state=RUNNABLE; SnapshotRegionProcedure e6cf1838296a4d970f499ee9f19b4ba1}] 2024-12-13T00:58:28,254 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=109, ppid=107, state=RUNNABLE; SnapshotRegionProcedure e6cf1838296a4d970f499ee9f19b4ba1 2024-12-13T00:58:28,255 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=108, ppid=107, state=RUNNABLE; SnapshotRegionProcedure 7291031b7331870285a02ea61c6bc826 2024-12-13T00:58:28,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=107 2024-12-13T00:58:28,406 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,45231,1734051384968 2024-12-13T00:58:28,407 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,46075,1734051384875 2024-12-13T00:58:28,407 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46075 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=108 2024-12-13T00:58:28,407 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45231 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=109 2024-12-13T00:58:28,408 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1734051506790.e6cf1838296a4d970f499ee9f19b4ba1. 2024-12-13T00:58:28,408 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1734051506790.7291031b7331870285a02ea61c6bc826. 2024-12-13T00:58:28,408 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.HRegion(2837): Flushing 7291031b7331870285a02ea61c6bc826 1/1 column families, dataSize=65 B heapSize=400 B 2024-12-13T00:58:28,408 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.HRegion(2837): Flushing e6cf1838296a4d970f499ee9f19b4ba1 1/1 column families, dataSize=3.19 KB heapSize=7.14 KB 2024-12-13T00:58:28,437 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion/7291031b7331870285a02ea61c6bc826/.tmp/cf/428ce3456fea4d7aa7066ee5ca32483d is 69, key is 0e651a49259ee416a9903e8800ab44f90/cf:q/1734051508158/Put/seqid=0 2024-12-13T00:58:28,451 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion/e6cf1838296a4d970f499ee9f19b4ba1/.tmp/cf/a07ef0a7d7794f12b3f3d8017dcab06e is 71, key is 112f1b91baeab5371e7edbf368d7081f/cf:q/1734051508167/Put/seqid=0 2024-12-13T00:58:28,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742107_1283 (size=5149) 2024-12-13T00:58:28,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742107_1283 (size=5149) 2024-12-13T00:58:28,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742107_1283 (size=5149) 2024-12-13T00:58:28,483 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion/7291031b7331870285a02ea61c6bc826/.tmp/cf/428ce3456fea4d7aa7066ee5ca32483d 2024-12-13T00:58:28,495 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion/7291031b7331870285a02ea61c6bc826/.tmp/cf/428ce3456fea4d7aa7066ee5ca32483d as hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion/7291031b7331870285a02ea61c6bc826/cf/428ce3456fea4d7aa7066ee5ca32483d 2024-12-13T00:58:28,513 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion/7291031b7331870285a02ea61c6bc826/cf/428ce3456fea4d7aa7066ee5ca32483d, entries=1, sequenceid=6, filesize=5.0 K 2024-12-13T00:58:28,514 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.HRegion(3040): Finished flush of dataSize ~65 B/65, heapSize ~384 B/384, currentSize=0 B/0 for 7291031b7331870285a02ea61c6bc826 in 106ms, sequenceid=6, compaction requested=false 2024-12-13T00:58:28,514 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.HRegion(2538): Flush status journal for 7291031b7331870285a02ea61c6bc826: 2024-12-13T00:58:28,515 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,,1734051506790.7291031b7331870285a02ea61c6bc826. for snaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-13T00:58:28,515 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,,1734051506790.7291031b7331870285a02ea61c6bc826.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-13T00:58:28,515 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-13T00:58:28,515 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion/7291031b7331870285a02ea61c6bc826/cf/428ce3456fea4d7aa7066ee5ca32483d] hfiles 2024-12-13T00:58:28,515 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion/7291031b7331870285a02ea61c6bc826/cf/428ce3456fea4d7aa7066ee5ca32483d for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-13T00:58:28,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742108_1284 (size=8460) 2024-12-13T00:58:28,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742108_1284 (size=8460) 2024-12-13T00:58:28,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742108_1284 (size=8460) 2024-12-13T00:58:28,529 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.19 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion/e6cf1838296a4d970f499ee9f19b4ba1/.tmp/cf/a07ef0a7d7794f12b3f3d8017dcab06e 2024-12-13T00:58:28,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=107 2024-12-13T00:58:28,544 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion/e6cf1838296a4d970f499ee9f19b4ba1/.tmp/cf/a07ef0a7d7794f12b3f3d8017dcab06e as hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion/e6cf1838296a4d970f499ee9f19b4ba1/cf/a07ef0a7d7794f12b3f3d8017dcab06e 2024-12-13T00:58:28,550 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion/e6cf1838296a4d970f499ee9f19b4ba1/cf/a07ef0a7d7794f12b3f3d8017dcab06e, entries=49, sequenceid=6, filesize=8.3 K 2024-12-13T00:58:28,552 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.HRegion(3040): Finished flush of dataSize ~3.19 KB/3271, heapSize ~7.13 KB/7296, currentSize=0 B/0 for e6cf1838296a4d970f499ee9f19b4ba1 in 143ms, sequenceid=6, compaction requested=false 2024-12-13T00:58:28,552 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.HRegion(2538): Flush status journal for e6cf1838296a4d970f499ee9f19b4ba1: 2024-12-13T00:58:28,552 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,1,1734051506790.e6cf1838296a4d970f499ee9f19b4ba1. for snaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-13T00:58:28,552 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,1,1734051506790.e6cf1838296a4d970f499ee9f19b4ba1.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-13T00:58:28,552 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-13T00:58:28,552 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion/e6cf1838296a4d970f499ee9f19b4ba1/cf/a07ef0a7d7794f12b3f3d8017dcab06e] hfiles 2024-12-13T00:58:28,552 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion/e6cf1838296a4d970f499ee9f19b4ba1/cf/a07ef0a7d7794f12b3f3d8017dcab06e for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-13T00:58:28,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742109_1285 (size=125) 2024-12-13T00:58:28,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742109_1285 (size=125) 2024-12-13T00:58:28,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742109_1285 (size=125) 2024-12-13T00:58:28,615 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1734051506790.7291031b7331870285a02ea61c6bc826. 2024-12-13T00:58:28,615 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=108 2024-12-13T00:58:28,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.HMaster(4106): Remote procedure done, pid=108 2024-12-13T00:58:28,616 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion on region 7291031b7331870285a02ea61c6bc826 2024-12-13T00:58:28,616 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=108, ppid=107, state=RUNNABLE; SnapshotRegionProcedure 7291031b7331870285a02ea61c6bc826 2024-12-13T00:58:28,619 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=108, ppid=107, state=SUCCESS; SnapshotRegionProcedure 7291031b7331870285a02ea61c6bc826 in 365 msec 2024-12-13T00:58:28,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742110_1286 (size=125) 2024-12-13T00:58:28,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742110_1286 (size=125) 2024-12-13T00:58:28,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742110_1286 (size=125) 2024-12-13T00:58:28,631 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1734051506790.e6cf1838296a4d970f499ee9f19b4ba1. 2024-12-13T00:58:28,631 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=109 2024-12-13T00:58:28,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.HMaster(4106): Remote procedure done, pid=109 2024-12-13T00:58:28,632 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion on region e6cf1838296a4d970f499ee9f19b4ba1 2024-12-13T00:58:28,632 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=109, ppid=107, state=RUNNABLE; SnapshotRegionProcedure e6cf1838296a4d970f499ee9f19b4ba1 2024-12-13T00:58:28,636 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=109, resume processing ppid=107 2024-12-13T00:58:28,636 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=109, ppid=107, state=SUCCESS; SnapshotRegionProcedure e6cf1838296a4d970f499ee9f19b4ba1 in 381 msec 2024-12-13T00:58:28,636 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-13T00:58:28,637 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-13T00:58:28,638 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-13T00:58:28,638 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-13T00:58:28,639 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-13T00:58:28,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742111_1287 (size=675) 2024-12-13T00:58:28,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742111_1287 (size=675) 2024-12-13T00:58:28,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742111_1287 (size=675) 2024-12-13T00:58:28,682 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-13T00:58:28,689 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-13T00:58:28,692 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion to hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-13T00:58:28,694 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-13T00:58:28,694 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 107 2024-12-13T00:58:28,697 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=107, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } in 462 msec 2024-12-13T00:58:28,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=107 2024-12-13T00:58:28,839 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion, procId: 107 completed 2024-12-13T00:58:28,861 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-13T00:58:28,863 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40256, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-13T00:58:28,865 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40687 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-12-13T00:58:28,866 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-13T00:58:28,868 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36256, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-13T00:58:28,868 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45231 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-12-13T00:58:28,869 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-13T00:58:28,877 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36290, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-13T00:58:28,877 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46075 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-12-13T00:58:28,880 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithMergeRegion-1', {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-13T00:58:28,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] procedure2.ProcedureExecutor(1098): Stored pid=110, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-13T00:58:28,882 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=110, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_PRE_OPERATION 2024-12-13T00:58:28,882 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T00:58:28,883 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithMergeRegion-1" procId is: 110 2024-12-13T00:58:28,883 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=110, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-13T00:58:28,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-13T00:58:28,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742112_1288 (size=399) 2024-12-13T00:58:28,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742112_1288 (size=399) 2024-12-13T00:58:28,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742112_1288 (size=399) 2024-12-13T00:58:28,905 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 276a185bd9b5f92320d03779894725c3, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1734051508880.276a185bd9b5f92320d03779894725c3.', STARTKEY => '', ENDKEY => '2'}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion-1', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee 2024-12-13T00:58:28,906 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => 46bb93906bd0c3ead0ff86e35c5ea599, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,2,1734051508880.46bb93906bd0c3ead0ff86e35c5ea599.', STARTKEY => '2', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion-1', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee 2024-12-13T00:58:28,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742113_1289 (size=85) 2024-12-13T00:58:28,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742113_1289 (size=85) 2024-12-13T00:58:28,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742113_1289 (size=85) 2024-12-13T00:58:28,927 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1734051508880.276a185bd9b5f92320d03779894725c3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T00:58:28,927 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1681): Closing 276a185bd9b5f92320d03779894725c3, disabling compactions & flushes 2024-12-13T00:58:28,927 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1734051508880.276a185bd9b5f92320d03779894725c3. 2024-12-13T00:58:28,927 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1734051508880.276a185bd9b5f92320d03779894725c3. 2024-12-13T00:58:28,927 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1734051508880.276a185bd9b5f92320d03779894725c3. after waiting 0 ms 2024-12-13T00:58:28,927 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1734051508880.276a185bd9b5f92320d03779894725c3. 2024-12-13T00:58:28,927 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1734051508880.276a185bd9b5f92320d03779894725c3. 2024-12-13T00:58:28,927 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1635): Region close journal for 276a185bd9b5f92320d03779894725c3: 2024-12-13T00:58:28,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742114_1290 (size=85) 2024-12-13T00:58:28,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742114_1290 (size=85) 2024-12-13T00:58:28,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742114_1290 (size=85) 2024-12-13T00:58:28,930 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,2,1734051508880.46bb93906bd0c3ead0ff86e35c5ea599.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T00:58:28,930 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1681): Closing 46bb93906bd0c3ead0ff86e35c5ea599, disabling compactions & flushes 2024-12-13T00:58:28,930 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,2,1734051508880.46bb93906bd0c3ead0ff86e35c5ea599. 2024-12-13T00:58:28,930 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1734051508880.46bb93906bd0c3ead0ff86e35c5ea599. 2024-12-13T00:58:28,930 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1734051508880.46bb93906bd0c3ead0ff86e35c5ea599. after waiting 0 ms 2024-12-13T00:58:28,930 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,2,1734051508880.46bb93906bd0c3ead0ff86e35c5ea599. 2024-12-13T00:58:28,930 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion-1,2,1734051508880.46bb93906bd0c3ead0ff86e35c5ea599. 2024-12-13T00:58:28,930 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1635): Region close journal for 46bb93906bd0c3ead0ff86e35c5ea599: 2024-12-13T00:58:28,931 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=110, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_ADD_TO_META 2024-12-13T00:58:28,932 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1734051508880.276a185bd9b5f92320d03779894725c3.","families":{"info":[{"qualifier":"regioninfo","vlen":84,"tag":[],"timestamp":"1734051508931"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734051508931"}]},"ts":"1734051508931"} 2024-12-13T00:58:28,932 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,2,1734051508880.46bb93906bd0c3ead0ff86e35c5ea599.","families":{"info":[{"qualifier":"regioninfo","vlen":84,"tag":[],"timestamp":"1734051508931"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734051508931"}]},"ts":"1734051508931"} 2024-12-13T00:58:28,935 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-13T00:58:28,935 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=110, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-13T00:58:28,935 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734051508935"}]},"ts":"1734051508935"} 2024-12-13T00:58:28,937 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=ENABLING in hbase:meta 2024-12-13T00:58:28,941 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(202): Hosts are {9812beb1e4cd=0} racks are {/default-rack=0} 2024-12-13T00:58:28,943 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-13T00:58:28,943 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-13T00:58:28,943 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-13T00:58:28,943 INFO [PEWorker-2 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-13T00:58:28,943 INFO [PEWorker-2 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-13T00:58:28,943 INFO [PEWorker-2 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-13T00:58:28,943 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-13T00:58:28,943 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=111, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=276a185bd9b5f92320d03779894725c3, ASSIGN}, {pid=112, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=46bb93906bd0c3ead0ff86e35c5ea599, ASSIGN}] 2024-12-13T00:58:28,944 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=112, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=46bb93906bd0c3ead0ff86e35c5ea599, ASSIGN 2024-12-13T00:58:28,944 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=111, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=276a185bd9b5f92320d03779894725c3, ASSIGN 2024-12-13T00:58:28,945 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=112, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=46bb93906bd0c3ead0ff86e35c5ea599, ASSIGN; state=OFFLINE, location=9812beb1e4cd,40687,1734051385054; forceNewPlan=false, retain=false 2024-12-13T00:58:28,946 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=111, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=276a185bd9b5f92320d03779894725c3, ASSIGN; state=OFFLINE, location=9812beb1e4cd,45231,1734051384968; forceNewPlan=false, retain=false 2024-12-13T00:58:28,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-13T00:58:29,096 INFO [9812beb1e4cd:38293 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-13T00:58:29,096 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=112 updating hbase:meta row=46bb93906bd0c3ead0ff86e35c5ea599, regionState=OPENING, regionLocation=9812beb1e4cd,40687,1734051385054 2024-12-13T00:58:29,096 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=111 updating hbase:meta row=276a185bd9b5f92320d03779894725c3, regionState=OPENING, regionLocation=9812beb1e4cd,45231,1734051384968 2024-12-13T00:58:29,098 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=113, ppid=111, state=RUNNABLE; OpenRegionProcedure 276a185bd9b5f92320d03779894725c3, server=9812beb1e4cd,45231,1734051384968}] 2024-12-13T00:58:29,099 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=114, ppid=112, state=RUNNABLE; OpenRegionProcedure 46bb93906bd0c3ead0ff86e35c5ea599, server=9812beb1e4cd,40687,1734051385054}] 2024-12-13T00:58:29,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-13T00:58:29,251 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,45231,1734051384968 2024-12-13T00:58:29,254 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemStateWithMergeRegion-1,,1734051508880.276a185bd9b5f92320d03779894725c3. 2024-12-13T00:58:29,254 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(7285): Opening region: {ENCODED => 276a185bd9b5f92320d03779894725c3, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1734051508880.276a185bd9b5f92320d03779894725c3.', STARTKEY => '', ENDKEY => '2'} 2024-12-13T00:58:29,255 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,,1734051508880.276a185bd9b5f92320d03779894725c3. service=AccessControlService 2024-12-13T00:58:29,255 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-13T00:58:29,255 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 276a185bd9b5f92320d03779894725c3 2024-12-13T00:58:29,255 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1734051508880.276a185bd9b5f92320d03779894725c3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T00:58:29,255 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(7327): checking encryption for 276a185bd9b5f92320d03779894725c3 2024-12-13T00:58:29,255 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(7330): checking classloading for 276a185bd9b5f92320d03779894725c3 2024-12-13T00:58:29,257 INFO [StoreOpener-276a185bd9b5f92320d03779894725c3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 276a185bd9b5f92320d03779894725c3 2024-12-13T00:58:29,258 INFO [StoreOpener-276a185bd9b5f92320d03779894725c3-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 276a185bd9b5f92320d03779894725c3 columnFamilyName cf 2024-12-13T00:58:29,258 DEBUG [StoreOpener-276a185bd9b5f92320d03779894725c3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T00:58:29,259 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,40687,1734051385054 2024-12-13T00:58:29,259 INFO [StoreOpener-276a185bd9b5f92320d03779894725c3-1 {}] regionserver.HStore(327): Store=276a185bd9b5f92320d03779894725c3/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T00:58:29,268 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/276a185bd9b5f92320d03779894725c3 2024-12-13T00:58:29,269 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/276a185bd9b5f92320d03779894725c3 2024-12-13T00:58:29,270 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemStateWithMergeRegion-1,2,1734051508880.46bb93906bd0c3ead0ff86e35c5ea599. 2024-12-13T00:58:29,271 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(7285): Opening region: {ENCODED => 46bb93906bd0c3ead0ff86e35c5ea599, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,2,1734051508880.46bb93906bd0c3ead0ff86e35c5ea599.', STARTKEY => '2', ENDKEY => ''} 2024-12-13T00:58:29,271 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,2,1734051508880.46bb93906bd0c3ead0ff86e35c5ea599. service=AccessControlService 2024-12-13T00:58:29,271 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-13T00:58:29,271 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 46bb93906bd0c3ead0ff86e35c5ea599 2024-12-13T00:58:29,271 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,2,1734051508880.46bb93906bd0c3ead0ff86e35c5ea599.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T00:58:29,272 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(7327): checking encryption for 46bb93906bd0c3ead0ff86e35c5ea599 2024-12-13T00:58:29,272 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(7330): checking classloading for 46bb93906bd0c3ead0ff86e35c5ea599 2024-12-13T00:58:29,274 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(1085): writing seq id for 276a185bd9b5f92320d03779894725c3 2024-12-13T00:58:29,278 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/276a185bd9b5f92320d03779894725c3/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T00:58:29,279 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(1102): Opened 276a185bd9b5f92320d03779894725c3; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64285361, jitterRate=-0.04207347333431244}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-13T00:58:29,280 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(1001): Region open journal for 276a185bd9b5f92320d03779894725c3: 2024-12-13T00:58:29,281 INFO [StoreOpener-46bb93906bd0c3ead0ff86e35c5ea599-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 46bb93906bd0c3ead0ff86e35c5ea599 2024-12-13T00:58:29,281 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,,1734051508880.276a185bd9b5f92320d03779894725c3., pid=113, masterSystemTime=1734051509251 2024-12-13T00:58:29,283 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,,1734051508880.276a185bd9b5f92320d03779894725c3. 2024-12-13T00:58:29,283 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemStateWithMergeRegion-1,,1734051508880.276a185bd9b5f92320d03779894725c3. 2024-12-13T00:58:29,284 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=111 updating hbase:meta row=276a185bd9b5f92320d03779894725c3, regionState=OPEN, openSeqNum=2, regionLocation=9812beb1e4cd,45231,1734051384968 2024-12-13T00:58:29,287 INFO [StoreOpener-46bb93906bd0c3ead0ff86e35c5ea599-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 46bb93906bd0c3ead0ff86e35c5ea599 columnFamilyName cf 2024-12-13T00:58:29,287 DEBUG [StoreOpener-46bb93906bd0c3ead0ff86e35c5ea599-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T00:58:29,287 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=113, resume processing ppid=111 2024-12-13T00:58:29,288 INFO [StoreOpener-46bb93906bd0c3ead0ff86e35c5ea599-1 {}] regionserver.HStore(327): Store=46bb93906bd0c3ead0ff86e35c5ea599/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T00:58:29,288 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=113, ppid=111, state=SUCCESS; OpenRegionProcedure 276a185bd9b5f92320d03779894725c3, server=9812beb1e4cd,45231,1734051384968 in 188 msec 2024-12-13T00:58:29,289 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/46bb93906bd0c3ead0ff86e35c5ea599 2024-12-13T00:58:29,289 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/46bb93906bd0c3ead0ff86e35c5ea599 2024-12-13T00:58:29,289 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=111, ppid=110, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=276a185bd9b5f92320d03779894725c3, ASSIGN in 345 msec 2024-12-13T00:58:29,292 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(1085): writing seq id for 46bb93906bd0c3ead0ff86e35c5ea599 2024-12-13T00:58:29,309 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/46bb93906bd0c3ead0ff86e35c5ea599/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T00:58:29,310 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(1102): Opened 46bb93906bd0c3ead0ff86e35c5ea599; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72477882, jitterRate=0.08000460267066956}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-13T00:58:29,310 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(1001): Region open journal for 46bb93906bd0c3ead0ff86e35c5ea599: 2024-12-13T00:58:29,311 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,2,1734051508880.46bb93906bd0c3ead0ff86e35c5ea599., pid=114, masterSystemTime=1734051509259 2024-12-13T00:58:29,313 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,2,1734051508880.46bb93906bd0c3ead0ff86e35c5ea599. 2024-12-13T00:58:29,313 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemStateWithMergeRegion-1,2,1734051508880.46bb93906bd0c3ead0ff86e35c5ea599. 2024-12-13T00:58:29,313 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=112 updating hbase:meta row=46bb93906bd0c3ead0ff86e35c5ea599, regionState=OPEN, openSeqNum=2, regionLocation=9812beb1e4cd,40687,1734051385054 2024-12-13T00:58:29,318 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=114, resume processing ppid=112 2024-12-13T00:58:29,318 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=114, ppid=112, state=SUCCESS; OpenRegionProcedure 46bb93906bd0c3ead0ff86e35c5ea599, server=9812beb1e4cd,40687,1734051385054 in 217 msec 2024-12-13T00:58:29,320 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=112, resume processing ppid=110 2024-12-13T00:58:29,321 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=112, ppid=110, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=46bb93906bd0c3ead0ff86e35c5ea599, ASSIGN in 375 msec 2024-12-13T00:58:29,321 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=110, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-13T00:58:29,321 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734051509321"}]},"ts":"1734051509321"} 2024-12-13T00:58:29,323 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=ENABLED in hbase:meta 2024-12-13T00:58:29,325 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=110, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_POST_OPERATION 2024-12-13T00:58:29,326 DEBUG [PEWorker-2 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testExportFileSystemStateWithMergeRegion-1 jenkins: RWXCA 2024-12-13T00:58:29,328 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40687 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion-1], kv [jenkins: RWXCA] 2024-12-13T00:58:29,330 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45231-0x10088dfff310002, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T00:58:29,330 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46075-0x10088dfff310001, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T00:58:29,330 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T00:58:29,330 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40687-0x10088dfff310003, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T00:58:29,332 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-13T00:58:29,332 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-13T00:58:29,332 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-13T00:58:29,332 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-13T00:58:29,332 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-13T00:58:29,332 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-13T00:58:29,333 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-13T00:58:29,333 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-13T00:58:29,335 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=110, state=SUCCESS; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 453 msec 2024-12-13T00:58:29,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-13T00:58:29,489 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1, procId: 110 completed 2024-12-13T00:58:29,509 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.HMaster$2(2219): Client=jenkins//172.17.0.2 merge regions [276a185bd9b5f92320d03779894725c3, 46bb93906bd0c3ead0ff86e35c5ea599] 2024-12-13T00:58:29,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] procedure2.ProcedureExecutor(1098): Stored pid=115, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[276a185bd9b5f92320d03779894725c3, 46bb93906bd0c3ead0ff86e35c5ea599], force=true 2024-12-13T00:58:29,515 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=115, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[276a185bd9b5f92320d03779894725c3, 46bb93906bd0c3ead0ff86e35c5ea599], force=true 2024-12-13T00:58:29,516 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=115, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[276a185bd9b5f92320d03779894725c3, 46bb93906bd0c3ead0ff86e35c5ea599], force=true 2024-12-13T00:58:29,516 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=115, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[276a185bd9b5f92320d03779894725c3, 46bb93906bd0c3ead0ff86e35c5ea599], force=true 2024-12-13T00:58:29,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=115 2024-12-13T00:58:29,531 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=116, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=276a185bd9b5f92320d03779894725c3, UNASSIGN}, {pid=117, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=46bb93906bd0c3ead0ff86e35c5ea599, UNASSIGN}] 2024-12-13T00:58:29,532 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=116, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=276a185bd9b5f92320d03779894725c3, UNASSIGN 2024-12-13T00:58:29,532 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=117, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=46bb93906bd0c3ead0ff86e35c5ea599, UNASSIGN 2024-12-13T00:58:29,532 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=116 updating hbase:meta row=276a185bd9b5f92320d03779894725c3, regionState=CLOSING, regionLocation=9812beb1e4cd,45231,1734051384968 2024-12-13T00:58:29,533 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=117 updating hbase:meta row=46bb93906bd0c3ead0ff86e35c5ea599, regionState=CLOSING, regionLocation=9812beb1e4cd,40687,1734051385054 2024-12-13T00:58:29,534 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-13T00:58:29,534 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=118, ppid=116, state=RUNNABLE; CloseRegionProcedure 276a185bd9b5f92320d03779894725c3, server=9812beb1e4cd,45231,1734051384968}] 2024-12-13T00:58:29,535 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-13T00:58:29,535 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=119, ppid=117, state=RUNNABLE; CloseRegionProcedure 46bb93906bd0c3ead0ff86e35c5ea599, server=9812beb1e4cd,40687,1734051385054}] 2024-12-13T00:58:29,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=115 2024-12-13T00:58:29,686 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,45231,1734051384968 2024-12-13T00:58:29,686 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,40687,1734051385054 2024-12-13T00:58:29,687 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] handler.UnassignRegionHandler(124): Close 276a185bd9b5f92320d03779894725c3 2024-12-13T00:58:29,687 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] handler.UnassignRegionHandler(138): Unassign region: split region: true: evictCache: true 2024-12-13T00:58:29,687 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] handler.UnassignRegionHandler(124): Close 46bb93906bd0c3ead0ff86e35c5ea599 2024-12-13T00:58:29,687 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] handler.UnassignRegionHandler(138): Unassign region: split region: true: evictCache: true 2024-12-13T00:58:29,687 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(1681): Closing 276a185bd9b5f92320d03779894725c3, disabling compactions & flushes 2024-12-13T00:58:29,687 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1681): Closing 46bb93906bd0c3ead0ff86e35c5ea599, disabling compactions & flushes 2024-12-13T00:58:29,687 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1734051508880.276a185bd9b5f92320d03779894725c3. 2024-12-13T00:58:29,687 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,2,1734051508880.46bb93906bd0c3ead0ff86e35c5ea599. 2024-12-13T00:58:29,687 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1734051508880.276a185bd9b5f92320d03779894725c3. 2024-12-13T00:58:29,687 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1734051508880.46bb93906bd0c3ead0ff86e35c5ea599. 2024-12-13T00:58:29,687 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1734051508880.276a185bd9b5f92320d03779894725c3. after waiting 0 ms 2024-12-13T00:58:29,687 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1734051508880.46bb93906bd0c3ead0ff86e35c5ea599. after waiting 0 ms 2024-12-13T00:58:29,687 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1734051508880.276a185bd9b5f92320d03779894725c3. 2024-12-13T00:58:29,687 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,2,1734051508880.46bb93906bd0c3ead0ff86e35c5ea599. 2024-12-13T00:58:29,687 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(2837): Flushing 46bb93906bd0c3ead0ff86e35c5ea599 1/1 column families, dataSize=24 B heapSize=352 B 2024-12-13T00:58:29,687 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(2837): Flushing 276a185bd9b5f92320d03779894725c3 1/1 column families, dataSize=24 B heapSize=352 B 2024-12-13T00:58:29,704 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/46bb93906bd0c3ead0ff86e35c5ea599/.tmp/cf/b11692776f4e4658ba6af97c200de1f8 is 28, key is 2/cf:/1734051509496/Put/seqid=0 2024-12-13T00:58:29,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742115_1291 (size=4945) 2024-12-13T00:58:29,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742115_1291 (size=4945) 2024-12-13T00:58:29,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742115_1291 (size=4945) 2024-12-13T00:58:29,711 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/46bb93906bd0c3ead0ff86e35c5ea599/.tmp/cf/b11692776f4e4658ba6af97c200de1f8 2024-12-13T00:58:29,711 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/276a185bd9b5f92320d03779894725c3/.tmp/cf/cfc711eac74642148e2be077e25fe20f is 28, key is 1/cf:/1734051509492/Put/seqid=0 2024-12-13T00:58:29,718 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/46bb93906bd0c3ead0ff86e35c5ea599/.tmp/cf/b11692776f4e4658ba6af97c200de1f8 as hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/46bb93906bd0c3ead0ff86e35c5ea599/cf/b11692776f4e4658ba6af97c200de1f8 2024-12-13T00:58:29,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742116_1292 (size=4945) 2024-12-13T00:58:29,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742116_1292 (size=4945) 2024-12-13T00:58:29,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742116_1292 (size=4945) 2024-12-13T00:58:29,720 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/276a185bd9b5f92320d03779894725c3/.tmp/cf/cfc711eac74642148e2be077e25fe20f 2024-12-13T00:58:29,725 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/46bb93906bd0c3ead0ff86e35c5ea599/cf/b11692776f4e4658ba6af97c200de1f8, entries=1, sequenceid=5, filesize=4.8 K 2024-12-13T00:58:29,726 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/276a185bd9b5f92320d03779894725c3/.tmp/cf/cfc711eac74642148e2be077e25fe20f as hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/276a185bd9b5f92320d03779894725c3/cf/cfc711eac74642148e2be077e25fe20f 2024-12-13T00:58:29,727 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(3040): Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for 46bb93906bd0c3ead0ff86e35c5ea599 in 40ms, sequenceid=5, compaction requested=false 2024-12-13T00:58:29,727 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion-1' 2024-12-13T00:58:29,732 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/276a185bd9b5f92320d03779894725c3/cf/cfc711eac74642148e2be077e25fe20f, entries=1, sequenceid=5, filesize=4.8 K 2024-12-13T00:58:29,732 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(3040): Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for 276a185bd9b5f92320d03779894725c3 in 45ms, sequenceid=5, compaction requested=false 2024-12-13T00:58:29,733 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/46bb93906bd0c3ead0ff86e35c5ea599/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-13T00:58:29,734 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-13T00:58:29,734 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion-1,2,1734051508880.46bb93906bd0c3ead0ff86e35c5ea599. 2024-12-13T00:58:29,734 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1635): Region close journal for 46bb93906bd0c3ead0ff86e35c5ea599: 2024-12-13T00:58:29,735 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] handler.UnassignRegionHandler(170): Closed 46bb93906bd0c3ead0ff86e35c5ea599 2024-12-13T00:58:29,736 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=117 updating hbase:meta row=46bb93906bd0c3ead0ff86e35c5ea599, regionState=CLOSED 2024-12-13T00:58:29,739 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=119, resume processing ppid=117 2024-12-13T00:58:29,739 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=119, ppid=117, state=SUCCESS; CloseRegionProcedure 46bb93906bd0c3ead0ff86e35c5ea599, server=9812beb1e4cd,40687,1734051385054 in 202 msec 2024-12-13T00:58:29,739 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/276a185bd9b5f92320d03779894725c3/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-13T00:58:29,740 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=117, ppid=115, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=46bb93906bd0c3ead0ff86e35c5ea599, UNASSIGN in 208 msec 2024-12-13T00:58:29,740 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-13T00:58:29,740 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1734051508880.276a185bd9b5f92320d03779894725c3. 2024-12-13T00:58:29,740 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(1635): Region close journal for 276a185bd9b5f92320d03779894725c3: 2024-12-13T00:58:29,741 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] handler.UnassignRegionHandler(170): Closed 276a185bd9b5f92320d03779894725c3 2024-12-13T00:58:29,742 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=116 updating hbase:meta row=276a185bd9b5f92320d03779894725c3, regionState=CLOSED 2024-12-13T00:58:29,744 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=118, resume processing ppid=116 2024-12-13T00:58:29,744 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=118, ppid=116, state=SUCCESS; CloseRegionProcedure 276a185bd9b5f92320d03779894725c3, server=9812beb1e4cd,45231,1734051384968 in 209 msec 2024-12-13T00:58:29,746 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=116, resume processing ppid=115 2024-12-13T00:58:29,746 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=116, ppid=115, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=276a185bd9b5f92320d03779894725c3, UNASSIGN in 213 msec 2024-12-13T00:58:29,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742117_1293 (size=84) 2024-12-13T00:58:29,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742117_1293 (size=84) 2024-12-13T00:58:29,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742117_1293 (size=84) 2024-12-13T00:58:29,766 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T00:58:29,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742118_1294 (size=20) 2024-12-13T00:58:29,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742118_1294 (size=20) 2024-12-13T00:58:29,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742118_1294 (size=20) 2024-12-13T00:58:29,781 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T00:58:29,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742119_1295 (size=21) 2024-12-13T00:58:29,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742119_1295 (size=21) 2024-12-13T00:58:29,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742119_1295 (size=21) 2024-12-13T00:58:29,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742120_1296 (size=84) 2024-12-13T00:58:29,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742120_1296 (size=84) 2024-12-13T00:58:29,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742120_1296 (size=84) 2024-12-13T00:58:29,797 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T00:58:29,807 DEBUG [PEWorker-2 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/b0972bb6f077a6b1eb2abc41db866af8/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=-1 2024-12-13T00:58:29,809 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1734051508880.276a185bd9b5f92320d03779894725c3.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-12-13T00:58:29,809 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,2,1734051508880.46bb93906bd0c3ead0ff86e35c5ea599.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-12-13T00:58:29,809 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":7,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1734051508881.b0972bb6f077a6b1eb2abc41db866af8.","families":{"info":[{"qualifier":"regioninfo","vlen":83,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"merge0000","vlen":84,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"merge0001","vlen":84,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-12-13T00:58:29,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=115 2024-12-13T00:58:29,841 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=120, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=b0972bb6f077a6b1eb2abc41db866af8, ASSIGN}] 2024-12-13T00:58:29,842 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=120, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=b0972bb6f077a6b1eb2abc41db866af8, ASSIGN 2024-12-13T00:58:29,843 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=120, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=b0972bb6f077a6b1eb2abc41db866af8, ASSIGN; state=MERGED, location=9812beb1e4cd,45231,1734051384968; forceNewPlan=false, retain=false 2024-12-13T00:58:29,993 INFO [9812beb1e4cd:38293 {}] balancer.BaseLoadBalancer(546): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-13T00:58:29,994 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=120 updating hbase:meta row=b0972bb6f077a6b1eb2abc41db866af8, regionState=OPENING, regionLocation=9812beb1e4cd,45231,1734051384968 2024-12-13T00:58:29,995 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=121, ppid=120, state=RUNNABLE; OpenRegionProcedure b0972bb6f077a6b1eb2abc41db866af8, server=9812beb1e4cd,45231,1734051384968}] 2024-12-13T00:58:30,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=115 2024-12-13T00:58:30,147 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,45231,1734051384968 2024-12-13T00:58:30,151 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemStateWithMergeRegion-1,,1734051508881.b0972bb6f077a6b1eb2abc41db866af8. 2024-12-13T00:58:30,151 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(7285): Opening region: {ENCODED => b0972bb6f077a6b1eb2abc41db866af8, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1734051508881.b0972bb6f077a6b1eb2abc41db866af8.', STARTKEY => '', ENDKEY => ''} 2024-12-13T00:58:30,151 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,,1734051508881.b0972bb6f077a6b1eb2abc41db866af8. service=AccessControlService 2024-12-13T00:58:30,151 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-13T00:58:30,152 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 b0972bb6f077a6b1eb2abc41db866af8 2024-12-13T00:58:30,152 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1734051508881.b0972bb6f077a6b1eb2abc41db866af8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T00:58:30,152 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(7327): checking encryption for b0972bb6f077a6b1eb2abc41db866af8 2024-12-13T00:58:30,152 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(7330): checking classloading for b0972bb6f077a6b1eb2abc41db866af8 2024-12-13T00:58:30,153 INFO [StoreOpener-b0972bb6f077a6b1eb2abc41db866af8-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region b0972bb6f077a6b1eb2abc41db866af8 2024-12-13T00:58:30,154 INFO [StoreOpener-b0972bb6f077a6b1eb2abc41db866af8-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b0972bb6f077a6b1eb2abc41db866af8 columnFamilyName cf 2024-12-13T00:58:30,154 DEBUG [StoreOpener-b0972bb6f077a6b1eb2abc41db866af8-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T00:58:30,173 DEBUG [StoreOpener-b0972bb6f077a6b1eb2abc41db866af8-1 {}] regionserver.StoreEngine(277): loaded hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/b0972bb6f077a6b1eb2abc41db866af8/cf/b11692776f4e4658ba6af97c200de1f8.46bb93906bd0c3ead0ff86e35c5ea599->hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/46bb93906bd0c3ead0ff86e35c5ea599/cf/b11692776f4e4658ba6af97c200de1f8-top 2024-12-13T00:58:30,179 DEBUG [StoreOpener-b0972bb6f077a6b1eb2abc41db866af8-1 {}] regionserver.StoreEngine(277): loaded hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/b0972bb6f077a6b1eb2abc41db866af8/cf/cfc711eac74642148e2be077e25fe20f.276a185bd9b5f92320d03779894725c3->hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/276a185bd9b5f92320d03779894725c3/cf/cfc711eac74642148e2be077e25fe20f-top 2024-12-13T00:58:30,180 INFO [StoreOpener-b0972bb6f077a6b1eb2abc41db866af8-1 {}] regionserver.HStore(327): Store=b0972bb6f077a6b1eb2abc41db866af8/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T00:58:30,181 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/b0972bb6f077a6b1eb2abc41db866af8 2024-12-13T00:58:30,182 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/b0972bb6f077a6b1eb2abc41db866af8 2024-12-13T00:58:30,185 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(1085): writing seq id for b0972bb6f077a6b1eb2abc41db866af8 2024-12-13T00:58:30,185 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(1102): Opened b0972bb6f077a6b1eb2abc41db866af8; next sequenceid=9; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62442876, jitterRate=-0.06952863931655884}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-13T00:58:30,186 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(1001): Region open journal for b0972bb6f077a6b1eb2abc41db866af8: 2024-12-13T00:58:30,187 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,,1734051508881.b0972bb6f077a6b1eb2abc41db866af8., pid=121, masterSystemTime=1734051510147 2024-12-13T00:58:30,187 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.CompactSplit(342): Ignoring compaction request for testtb-testExportFileSystemStateWithMergeRegion-1,,1734051508881.b0972bb6f077a6b1eb2abc41db866af8.,because compaction is disabled. 2024-12-13T00:58:30,189 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,,1734051508881.b0972bb6f077a6b1eb2abc41db866af8. 2024-12-13T00:58:30,189 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemStateWithMergeRegion-1,,1734051508881.b0972bb6f077a6b1eb2abc41db866af8. 2024-12-13T00:58:30,190 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=120 updating hbase:meta row=b0972bb6f077a6b1eb2abc41db866af8, regionState=OPEN, openSeqNum=9, regionLocation=9812beb1e4cd,45231,1734051384968 2024-12-13T00:58:30,192 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=121, resume processing ppid=120 2024-12-13T00:58:30,192 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=121, ppid=120, state=SUCCESS; OpenRegionProcedure b0972bb6f077a6b1eb2abc41db866af8, server=9812beb1e4cd,45231,1734051384968 in 196 msec 2024-12-13T00:58:30,194 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=120, resume processing ppid=115 2024-12-13T00:58:30,194 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=120, ppid=115, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=b0972bb6f077a6b1eb2abc41db866af8, ASSIGN in 351 msec 2024-12-13T00:58:30,195 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=115, state=SUCCESS; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[276a185bd9b5f92320d03779894725c3, 46bb93906bd0c3ead0ff86e35c5ea599], force=true in 683 msec 2024-12-13T00:58:30,388 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734051393214_0005_000001 (auth:SIMPLE) from 127.0.0.1:39046 2024-12-13T00:58:30,400 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/MiniMRCluster_1870862804/yarn-2296387876/MiniMRCluster_1870862804-localDir-nm-1_3/usercache/jenkins/appcache/application_1734051393214_0005/container_1734051393214_0005_01_000001/launch_container.sh] 2024-12-13T00:58:30,400 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/MiniMRCluster_1870862804/yarn-2296387876/MiniMRCluster_1870862804-localDir-nm-1_3/usercache/jenkins/appcache/application_1734051393214_0005/container_1734051393214_0005_01_000001/container_tokens] 2024-12-13T00:58:30,400 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/MiniMRCluster_1870862804/yarn-2296387876/MiniMRCluster_1870862804-localDir-nm-1_3/usercache/jenkins/appcache/application_1734051393214_0005/container_1734051393214_0005_01_000001/sysfs] 2024-12-13T00:58:30,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=115 2024-12-13T00:58:30,621 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: MERGE_REGIONS, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1, procId: 115 completed 2024-12-13T00:58:30,622 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } 2024-12-13T00:58:30,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734051510622 (current time:1734051510622). 2024-12-13T00:58:30,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-13T00:58:30,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testExportFileSystemStateWithMergeRegion-1 VERSION not specified, setting to 2 2024-12-13T00:58:30,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-13T00:58:30,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x276b7dbc to 127.0.0.1:49424 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@12dbd193 2024-12-13T00:58:30,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@693242e0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T00:58:30,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T00:58:30,629 INFO [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40264, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T00:58:30,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x276b7dbc to 127.0.0.1:49424 2024-12-13T00:58:30,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T00:58:30,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x200a4a24 to 127.0.0.1:49424 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6c1150ff 2024-12-13T00:58:30,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@174f3adf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T00:58:30,635 DEBUG [hconnection-0x49b59e2f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T00:58:30,636 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40266, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T00:58:30,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x200a4a24 to 127.0.0.1:49424 2024-12-13T00:58:30,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T00:58:30,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion-1], kv [jenkins: RWXCA] 2024-12-13T00:58:30,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-13T00:58:30,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] procedure2.ProcedureExecutor(1098): Stored pid=122, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } 2024-12-13T00:58:30,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 }, snapshot procedure id = 122 2024-12-13T00:58:30,641 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-13T00:58:30,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-13T00:58:30,642 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-13T00:58:30,644 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-13T00:58:30,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742121_1297 (size=216) 2024-12-13T00:58:30,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742121_1297 (size=216) 2024-12-13T00:58:30,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742121_1297 (size=216) 2024-12-13T00:58:30,654 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-13T00:58:30,654 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=123, ppid=122, state=RUNNABLE; SnapshotRegionProcedure b0972bb6f077a6b1eb2abc41db866af8}] 2024-12-13T00:58:30,655 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=123, ppid=122, state=RUNNABLE; SnapshotRegionProcedure b0972bb6f077a6b1eb2abc41db866af8 2024-12-13T00:58:30,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-13T00:58:30,807 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,45231,1734051384968 2024-12-13T00:58:30,808 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45231 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=123 2024-12-13T00:58:30,808 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion-1,,1734051508881.b0972bb6f077a6b1eb2abc41db866af8. 2024-12-13T00:58:30,808 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HRegion(2538): Flush status journal for b0972bb6f077a6b1eb2abc41db866af8: 2024-12-13T00:58:30,808 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion-1,,1734051508881.b0972bb6f077a6b1eb2abc41db866af8. for snaptb0-testExportFileSystemStateWithMergeRegion-1 completed. 2024-12-13T00:58:30,808 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion-1,,1734051508881.b0972bb6f077a6b1eb2abc41db866af8.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-13T00:58:30,809 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-13T00:58:30,809 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/b0972bb6f077a6b1eb2abc41db866af8/cf/b11692776f4e4658ba6af97c200de1f8.46bb93906bd0c3ead0ff86e35c5ea599->hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/46bb93906bd0c3ead0ff86e35c5ea599/cf/b11692776f4e4658ba6af97c200de1f8-top, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/b0972bb6f077a6b1eb2abc41db866af8/cf/cfc711eac74642148e2be077e25fe20f.276a185bd9b5f92320d03779894725c3->hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/276a185bd9b5f92320d03779894725c3/cf/cfc711eac74642148e2be077e25fe20f-top] hfiles 2024-12-13T00:58:30,809 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(265): Adding reference for file (1/2): hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/b0972bb6f077a6b1eb2abc41db866af8/cf/b11692776f4e4658ba6af97c200de1f8.46bb93906bd0c3ead0ff86e35c5ea599 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-13T00:58:30,810 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(265): Adding reference for file (2/2): hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/b0972bb6f077a6b1eb2abc41db866af8/cf/cfc711eac74642148e2be077e25fe20f.276a185bd9b5f92320d03779894725c3 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-13T00:58:30,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742122_1298 (size=269) 2024-12-13T00:58:30,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742122_1298 (size=269) 2024-12-13T00:58:30,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742122_1298 (size=269) 2024-12-13T00:58:30,817 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion-1,,1734051508881.b0972bb6f077a6b1eb2abc41db866af8. 2024-12-13T00:58:30,817 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=123 2024-12-13T00:58:30,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.HMaster(4106): Remote procedure done, pid=123 2024-12-13T00:58:30,818 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion-1 on region b0972bb6f077a6b1eb2abc41db866af8 2024-12-13T00:58:30,818 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=123, ppid=122, state=RUNNABLE; SnapshotRegionProcedure b0972bb6f077a6b1eb2abc41db866af8 2024-12-13T00:58:30,820 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=123, resume processing ppid=122 2024-12-13T00:58:30,820 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=123, ppid=122, state=SUCCESS; SnapshotRegionProcedure b0972bb6f077a6b1eb2abc41db866af8 in 165 msec 2024-12-13T00:58:30,820 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-13T00:58:30,821 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-13T00:58:30,821 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-13T00:58:30,821 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-13T00:58:30,822 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-13T00:58:30,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742123_1299 (size=670) 2024-12-13T00:58:30,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742123_1299 (size=670) 2024-12-13T00:58:30,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742123_1299 (size=670) 2024-12-13T00:58:30,837 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-13T00:58:30,843 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-13T00:58:30,843 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 to hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-13T00:58:30,844 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-13T00:58:30,845 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 }, snapshot procedure id = 122 2024-12-13T00:58:30,846 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=122, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } in 206 msec 2024-12-13T00:58:30,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-13T00:58:30,944 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1, procId: 122 completed 2024-12-13T00:58:30,944 INFO [Time-limited test {}] snapshot.TestExportSnapshot(468): HDFS export destination path: hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/export-test/export-1734051510944 2024-12-13T00:58:30,944 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=hdfs://localhost:46775, tgtDir=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/export-test/export-1734051510944, rawTgtDir=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/export-test/export-1734051510944, srcFsUri=hdfs://localhost:46775, srcDir=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee 2024-12-13T00:58:30,979 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:46775, inputRoot=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee 2024-12-13T00:58:30,979 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1349531497_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/export-test/export-1734051510944, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/export-test/export-1734051510944/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-13T00:58:30,997 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-13T00:58:31,002 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 to hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/export-test/export-1734051510944/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-13T00:58:31,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742124_1300 (size=216) 2024-12-13T00:58:31,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742124_1300 (size=216) 2024-12-13T00:58:31,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742125_1301 (size=670) 2024-12-13T00:58:31,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742124_1300 (size=216) 2024-12-13T00:58:31,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742125_1301 (size=670) 2024-12-13T00:58:31,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742125_1301 (size=670) 2024-12-13T00:58:31,018 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar 2024-12-13T00:58:31,019 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-13T00:58:31,019 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-13T00:58:31,019 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-13T00:58:31,948 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-13T00:58:32,108 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/hadoop-10107778129158868335.jar 2024-12-13T00:58:32,108 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-13T00:58:32,109 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-13T00:58:32,182 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/hadoop-15525669215853213864.jar 2024-12-13T00:58:32,183 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-13T00:58:32,183 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-13T00:58:32,184 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-13T00:58:32,184 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-13T00:58:32,184 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-13T00:58:32,185 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-13T00:58:32,186 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-13T00:58:32,186 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-13T00:58:32,186 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-13T00:58:32,187 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-13T00:58:32,187 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-13T00:58:32,187 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-13T00:58:32,188 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-13T00:58:32,188 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-13T00:58:32,188 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-13T00:58:32,189 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-13T00:58:32,189 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-13T00:58:32,189 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-13T00:58:32,190 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-13T00:58:32,190 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-13T00:58:32,191 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-13T00:58:32,191 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-13T00:58:32,191 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-13T00:58:32,192 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-13T00:58:32,192 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-13T00:58:32,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742126_1302 (size=127628) 2024-12-13T00:58:32,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742126_1302 (size=127628) 2024-12-13T00:58:32,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742126_1302 (size=127628) 2024-12-13T00:58:32,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742127_1303 (size=2172137) 2024-12-13T00:58:32,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742127_1303 (size=2172137) 2024-12-13T00:58:32,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742127_1303 (size=2172137) 2024-12-13T00:58:32,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742128_1304 (size=213228) 2024-12-13T00:58:32,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742128_1304 (size=213228) 2024-12-13T00:58:32,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742128_1304 (size=213228) 2024-12-13T00:58:32,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742129_1305 (size=1877034) 2024-12-13T00:58:32,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742129_1305 (size=1877034) 2024-12-13T00:58:32,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742129_1305 (size=1877034) 2024-12-13T00:58:32,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742130_1306 (size=533455) 2024-12-13T00:58:32,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742130_1306 (size=533455) 2024-12-13T00:58:32,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742130_1306 (size=533455) 2024-12-13T00:58:32,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742131_1307 (size=7280644) 2024-12-13T00:58:32,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742131_1307 (size=7280644) 2024-12-13T00:58:32,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742131_1307 (size=7280644) 2024-12-13T00:58:32,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742132_1308 (size=4188619) 2024-12-13T00:58:32,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742132_1308 (size=4188619) 2024-12-13T00:58:32,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742132_1308 (size=4188619) 2024-12-13T00:58:32,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742133_1309 (size=20406) 2024-12-13T00:58:32,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742133_1309 (size=20406) 2024-12-13T00:58:32,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742133_1309 (size=20406) 2024-12-13T00:58:32,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742134_1310 (size=75495) 2024-12-13T00:58:32,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742134_1310 (size=75495) 2024-12-13T00:58:32,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742134_1310 (size=75495) 2024-12-13T00:58:32,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742135_1311 (size=45609) 2024-12-13T00:58:32,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742135_1311 (size=45609) 2024-12-13T00:58:32,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742135_1311 (size=45609) 2024-12-13T00:58:32,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742136_1312 (size=110084) 2024-12-13T00:58:32,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742136_1312 (size=110084) 2024-12-13T00:58:32,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742136_1312 (size=110084) 2024-12-13T00:58:32,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742137_1313 (size=1323991) 2024-12-13T00:58:32,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742137_1313 (size=1323991) 2024-12-13T00:58:32,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742137_1313 (size=1323991) 2024-12-13T00:58:32,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742138_1314 (size=23076) 2024-12-13T00:58:32,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742138_1314 (size=23076) 2024-12-13T00:58:32,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742138_1314 (size=23076) 2024-12-13T00:58:32,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742139_1315 (size=126803) 2024-12-13T00:58:32,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742139_1315 (size=126803) 2024-12-13T00:58:32,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742139_1315 (size=126803) 2024-12-13T00:58:32,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742140_1316 (size=322274) 2024-12-13T00:58:32,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742140_1316 (size=322274) 2024-12-13T00:58:32,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742140_1316 (size=322274) 2024-12-13T00:58:32,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742141_1317 (size=6350924) 2024-12-13T00:58:32,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742141_1317 (size=6350924) 2024-12-13T00:58:32,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742141_1317 (size=6350924) 2024-12-13T00:58:32,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742142_1318 (size=1832290) 2024-12-13T00:58:32,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742142_1318 (size=1832290) 2024-12-13T00:58:32,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742142_1318 (size=1832290) 2024-12-13T00:58:32,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742143_1319 (size=30081) 2024-12-13T00:58:32,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742143_1319 (size=30081) 2024-12-13T00:58:32,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742143_1319 (size=30081) 2024-12-13T00:58:32,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742144_1320 (size=53616) 2024-12-13T00:58:32,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742144_1320 (size=53616) 2024-12-13T00:58:32,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742144_1320 (size=53616) 2024-12-13T00:58:32,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742145_1321 (size=29229) 2024-12-13T00:58:32,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742145_1321 (size=29229) 2024-12-13T00:58:32,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742145_1321 (size=29229) 2024-12-13T00:58:32,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742146_1322 (size=169089) 2024-12-13T00:58:32,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742146_1322 (size=169089) 2024-12-13T00:58:32,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742146_1322 (size=169089) 2024-12-13T00:58:32,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742147_1323 (size=5175431) 2024-12-13T00:58:32,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742147_1323 (size=5175431) 2024-12-13T00:58:32,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742147_1323 (size=5175431) 2024-12-13T00:58:32,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742148_1324 (size=451756) 2024-12-13T00:58:32,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742148_1324 (size=451756) 2024-12-13T00:58:32,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742148_1324 (size=451756) 2024-12-13T00:58:32,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742149_1325 (size=136454) 2024-12-13T00:58:32,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742149_1325 (size=136454) 2024-12-13T00:58:32,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742149_1325 (size=136454) 2024-12-13T00:58:32,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742150_1326 (size=907855) 2024-12-13T00:58:32,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742150_1326 (size=907855) 2024-12-13T00:58:32,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742150_1326 (size=907855) 2024-12-13T00:58:32,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742151_1327 (size=3317408) 2024-12-13T00:58:32,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742151_1327 (size=3317408) 2024-12-13T00:58:32,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742151_1327 (size=3317408) 2024-12-13T00:58:32,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742152_1328 (size=503880) 2024-12-13T00:58:32,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742152_1328 (size=503880) 2024-12-13T00:58:32,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742152_1328 (size=503880) 2024-12-13T00:58:32,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742153_1329 (size=4695811) 2024-12-13T00:58:32,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742153_1329 (size=4695811) 2024-12-13T00:58:32,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742153_1329 (size=4695811) 2024-12-13T00:58:32,694 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-13T00:58:32,697 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb0-testExportFileSystemStateWithMergeRegion-1' hfile list 2024-12-13T00:58:32,700 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=9.7 K 2024-12-13T00:58:32,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742154_1330 (size=378) 2024-12-13T00:58:32,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742154_1330 (size=378) 2024-12-13T00:58:32,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742154_1330 (size=378) 2024-12-13T00:58:32,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742155_1331 (size=15) 2024-12-13T00:58:32,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742155_1331 (size=15) 2024-12-13T00:58:32,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742155_1331 (size=15) 2024-12-13T00:58:32,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742156_1332 (size=304944) 2024-12-13T00:58:32,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742156_1332 (size=304944) 2024-12-13T00:58:32,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742156_1332 (size=304944) 2024-12-13T00:58:32,757 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-13T00:58:32,757 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-13T00:58:33,125 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734051393214_0006_000001 (auth:SIMPLE) from 127.0.0.1:45604 2024-12-13T00:58:34,530 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion 2024-12-13T00:58:34,530 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion Metrics about Tables on a single HBase RegionServer 2024-12-13T00:58:34,531 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-13T00:58:34,531 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 Metrics about Tables on a single HBase RegionServer 2024-12-13T00:58:34,532 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports 2024-12-13T00:58:39,518 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734051393214_0006_000001 (auth:SIMPLE) from 127.0.0.1:52890 2024-12-13T00:58:39,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742157_1333 (size=350618) 2024-12-13T00:58:39,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742157_1333 (size=350618) 2024-12-13T00:58:39,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742157_1333 (size=350618) 2024-12-13T00:58:40,034 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-13T00:58:41,875 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734051393214_0006_000001 (auth:SIMPLE) from 127.0.0.1:45610 2024-12-13T00:58:46,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742158_1334 (size=4945) 2024-12-13T00:58:46,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742158_1334 (size=4945) 2024-12-13T00:58:46,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742158_1334 (size=4945) 2024-12-13T00:58:46,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742159_1335 (size=4945) 2024-12-13T00:58:46,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742159_1335 (size=4945) 2024-12-13T00:58:46,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742159_1335 (size=4945) 2024-12-13T00:58:47,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742160_1336 (size=17474) 2024-12-13T00:58:47,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742160_1336 (size=17474) 2024-12-13T00:58:47,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742160_1336 (size=17474) 2024-12-13T00:58:47,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742161_1337 (size=482) 2024-12-13T00:58:47,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742161_1337 (size=482) 2024-12-13T00:58:47,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742161_1337 (size=482) 2024-12-13T00:58:47,160 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/MiniMRCluster_1870862804/yarn-2296387876/MiniMRCluster_1870862804-localDir-nm-0_3/usercache/jenkins/appcache/application_1734051393214_0006/container_1734051393214_0006_01_000002/launch_container.sh] 2024-12-13T00:58:47,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742162_1338 (size=17474) 2024-12-13T00:58:47,160 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/MiniMRCluster_1870862804/yarn-2296387876/MiniMRCluster_1870862804-localDir-nm-0_3/usercache/jenkins/appcache/application_1734051393214_0006/container_1734051393214_0006_01_000002/container_tokens] 2024-12-13T00:58:47,160 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/MiniMRCluster_1870862804/yarn-2296387876/MiniMRCluster_1870862804-localDir-nm-0_3/usercache/jenkins/appcache/application_1734051393214_0006/container_1734051393214_0006_01_000002/sysfs] 2024-12-13T00:58:47,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742162_1338 (size=17474) 2024-12-13T00:58:47,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742162_1338 (size=17474) 2024-12-13T00:58:47,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742163_1339 (size=350618) 2024-12-13T00:58:47,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742163_1339 (size=350618) 2024-12-13T00:58:47,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742163_1339 (size=350618) 2024-12-13T00:58:47,217 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734051393214_0006_000001 (auth:SIMPLE) from 127.0.0.1:60656 2024-12-13T00:58:49,145 INFO [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export 2024-12-13T00:58:49,147 INFO [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity. 2024-12-13T00:58:49,153 INFO [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-13T00:58:49,153 INFO [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot 2024-12-13T00:58:49,154 INFO [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state 2024-12-13T00:58:49,154 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1349531497_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 at hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-13T00:58:49,155 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/.snapshotinfo 2024-12-13T00:58:49,155 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/data.manifest 2024-12-13T00:58:49,155 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1349531497_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/export-test/export-1734051510944/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 at hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/export-test/export-1734051510944/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-13T00:58:49,155 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/export-test/export-1734051510944/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/.snapshotinfo 2024-12-13T00:58:49,155 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/export-test/export-1734051510944/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/data.manifest 2024-12-13T00:58:49,161 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-13T00:58:49,162 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-13T00:58:49,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] procedure2.ProcedureExecutor(1098): Stored pid=124, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-13T00:58:49,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-13T00:58:49,165 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734051529165"}]},"ts":"1734051529165"} 2024-12-13T00:58:49,167 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=DISABLING in hbase:meta 2024-12-13T00:58:49,169 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithMergeRegion-1 to state=DISABLING 2024-12-13T00:58:49,170 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=125, ppid=124, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1}] 2024-12-13T00:58:49,171 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=126, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=b0972bb6f077a6b1eb2abc41db866af8, UNASSIGN}] 2024-12-13T00:58:49,172 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=126, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=b0972bb6f077a6b1eb2abc41db866af8, UNASSIGN 2024-12-13T00:58:49,173 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=126 updating hbase:meta row=b0972bb6f077a6b1eb2abc41db866af8, regionState=CLOSING, regionLocation=9812beb1e4cd,45231,1734051384968 2024-12-13T00:58:49,174 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-13T00:58:49,174 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=127, ppid=126, state=RUNNABLE; CloseRegionProcedure b0972bb6f077a6b1eb2abc41db866af8, server=9812beb1e4cd,45231,1734051384968}] 2024-12-13T00:58:49,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-13T00:58:49,326 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,45231,1734051384968 2024-12-13T00:58:49,326 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] handler.UnassignRegionHandler(124): Close b0972bb6f077a6b1eb2abc41db866af8 2024-12-13T00:58:49,327 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-13T00:58:49,327 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1681): Closing b0972bb6f077a6b1eb2abc41db866af8, disabling compactions & flushes 2024-12-13T00:58:49,327 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1734051508881.b0972bb6f077a6b1eb2abc41db866af8. 2024-12-13T00:58:49,327 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1734051508881.b0972bb6f077a6b1eb2abc41db866af8. 2024-12-13T00:58:49,327 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1734051508881.b0972bb6f077a6b1eb2abc41db866af8. after waiting 0 ms 2024-12-13T00:58:49,327 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1734051508881.b0972bb6f077a6b1eb2abc41db866af8. 2024-12-13T00:58:49,336 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/b0972bb6f077a6b1eb2abc41db866af8/recovered.edits/12.seqid, newMaxSeqId=12, maxSeqId=8 2024-12-13T00:58:49,336 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-13T00:58:49,337 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1734051508881.b0972bb6f077a6b1eb2abc41db866af8. 2024-12-13T00:58:49,337 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1635): Region close journal for b0972bb6f077a6b1eb2abc41db866af8: 2024-12-13T00:58:49,338 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] handler.UnassignRegionHandler(170): Closed b0972bb6f077a6b1eb2abc41db866af8 2024-12-13T00:58:49,339 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=126 updating hbase:meta row=b0972bb6f077a6b1eb2abc41db866af8, regionState=CLOSED 2024-12-13T00:58:49,341 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=127, resume processing ppid=126 2024-12-13T00:58:49,342 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=127, ppid=126, state=SUCCESS; CloseRegionProcedure b0972bb6f077a6b1eb2abc41db866af8, server=9812beb1e4cd,45231,1734051384968 in 166 msec 2024-12-13T00:58:49,342 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=126, resume processing ppid=125 2024-12-13T00:58:49,342 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=126, ppid=125, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=b0972bb6f077a6b1eb2abc41db866af8, UNASSIGN in 170 msec 2024-12-13T00:58:49,344 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=125, resume processing ppid=124 2024-12-13T00:58:49,344 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=125, ppid=124, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 173 msec 2024-12-13T00:58:49,345 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734051529345"}]},"ts":"1734051529345"} 2024-12-13T00:58:49,347 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=DISABLED in hbase:meta 2024-12-13T00:58:49,349 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithMergeRegion-1 to state=DISABLED 2024-12-13T00:58:49,351 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=124, state=SUCCESS; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 188 msec 2024-12-13T00:58:49,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-13T00:58:49,467 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1, procId: 124 completed 2024-12-13T00:58:49,468 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-13T00:58:49,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] procedure2.ProcedureExecutor(1098): Stored pid=128, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-13T00:58:49,469 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=128, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-13T00:58:49,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-13T00:58:49,470 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=128, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-13T00:58:49,471 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40687 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-13T00:58:49,473 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/b0972bb6f077a6b1eb2abc41db866af8 2024-12-13T00:58:49,473 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/46bb93906bd0c3ead0ff86e35c5ea599 2024-12-13T00:58:49,473 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/276a185bd9b5f92320d03779894725c3 2024-12-13T00:58:49,474 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40687-0x10088dfff310003, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-13T00:58:49,474 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45231-0x10088dfff310002, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-13T00:58:49,475 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-13T00:58:49,475 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46075-0x10088dfff310001, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-13T00:58:49,475 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/b0972bb6f077a6b1eb2abc41db866af8/cf, FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/b0972bb6f077a6b1eb2abc41db866af8/recovered.edits] 2024-12-13T00:58:49,475 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/276a185bd9b5f92320d03779894725c3/cf, FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/276a185bd9b5f92320d03779894725c3/recovered.edits] 2024-12-13T00:58:49,475 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-12-13T00:58:49,475 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-12-13T00:58:49,476 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/46bb93906bd0c3ead0ff86e35c5ea599/cf, FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/46bb93906bd0c3ead0ff86e35c5ea599/recovered.edits] 2024-12-13T00:58:49,476 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-12-13T00:58:49,476 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-12-13T00:58:49,476 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46075-0x10088dfff310001, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-13T00:58:49,476 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40687-0x10088dfff310003, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-13T00:58:49,477 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46075-0x10088dfff310001, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T00:58:49,477 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40687-0x10088dfff310003, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T00:58:49,477 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45231-0x10088dfff310002, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-13T00:58:49,477 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45231-0x10088dfff310002, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T00:58:49,477 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-13T00:58:49,477 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T00:58:49,478 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-13T00:58:49,478 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-13T00:58:49,478 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-13T00:58:49,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-13T00:58:49,478 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-13T00:58:49,483 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/46bb93906bd0c3ead0ff86e35c5ea599/cf/b11692776f4e4658ba6af97c200de1f8 to hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/46bb93906bd0c3ead0ff86e35c5ea599/cf/b11692776f4e4658ba6af97c200de1f8 2024-12-13T00:58:49,484 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/276a185bd9b5f92320d03779894725c3/cf/cfc711eac74642148e2be077e25fe20f to hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/276a185bd9b5f92320d03779894725c3/cf/cfc711eac74642148e2be077e25fe20f 2024-12-13T00:58:49,484 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/b0972bb6f077a6b1eb2abc41db866af8/cf/b11692776f4e4658ba6af97c200de1f8.46bb93906bd0c3ead0ff86e35c5ea599 to hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/b0972bb6f077a6b1eb2abc41db866af8/cf/b11692776f4e4658ba6af97c200de1f8.46bb93906bd0c3ead0ff86e35c5ea599 2024-12-13T00:58:49,485 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/b0972bb6f077a6b1eb2abc41db866af8/cf/cfc711eac74642148e2be077e25fe20f.276a185bd9b5f92320d03779894725c3 to hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/b0972bb6f077a6b1eb2abc41db866af8/cf/cfc711eac74642148e2be077e25fe20f.276a185bd9b5f92320d03779894725c3 2024-12-13T00:58:49,488 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/46bb93906bd0c3ead0ff86e35c5ea599/recovered.edits/8.seqid to hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/46bb93906bd0c3ead0ff86e35c5ea599/recovered.edits/8.seqid 2024-12-13T00:58:49,488 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/46bb93906bd0c3ead0ff86e35c5ea599 2024-12-13T00:58:49,488 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/276a185bd9b5f92320d03779894725c3/recovered.edits/8.seqid to hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/276a185bd9b5f92320d03779894725c3/recovered.edits/8.seqid 2024-12-13T00:58:49,489 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/276a185bd9b5f92320d03779894725c3 2024-12-13T00:58:49,489 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/b0972bb6f077a6b1eb2abc41db866af8/recovered.edits/12.seqid to hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/b0972bb6f077a6b1eb2abc41db866af8/recovered.edits/12.seqid 2024-12-13T00:58:49,490 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/b0972bb6f077a6b1eb2abc41db866af8 2024-12-13T00:58:49,490 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithMergeRegion-1 regions 2024-12-13T00:58:49,493 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=128, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-13T00:58:49,496 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of testtb-testExportFileSystemStateWithMergeRegion-1 from hbase:meta 2024-12-13T00:58:49,498 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportFileSystemStateWithMergeRegion-1' descriptor. 2024-12-13T00:58:49,499 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=128, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-13T00:58:49,499 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportFileSystemStateWithMergeRegion-1' from region states. 2024-12-13T00:58:49,499 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1734051508881.b0972bb6f077a6b1eb2abc41db866af8.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734051529499"}]},"ts":"9223372036854775807"} 2024-12-13T00:58:49,501 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-13T00:58:49,501 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => b0972bb6f077a6b1eb2abc41db866af8, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1734051508881.b0972bb6f077a6b1eb2abc41db866af8.', STARTKEY => '', ENDKEY => ''}] 2024-12-13T00:58:49,501 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportFileSystemStateWithMergeRegion-1' as deleted. 2024-12-13T00:58:49,501 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734051529501"}]},"ts":"9223372036854775807"} 2024-12-13T00:58:49,503 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testExportFileSystemStateWithMergeRegion-1 state from META 2024-12-13T00:58:49,505 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=128, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-13T00:58:49,506 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=128, state=SUCCESS; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 37 msec 2024-12-13T00:58:49,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-13T00:58:49,580 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1, procId: 128 completed 2024-12-13T00:58:49,580 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testExportFileSystemStateWithMergeRegion 2024-12-13T00:58:49,581 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithMergeRegion 2024-12-13T00:58:49,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] procedure2.ProcedureExecutor(1098): Stored pid=129, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-13T00:58:49,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=129 2024-12-13T00:58:49,583 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734051529583"}]},"ts":"1734051529583"} 2024-12-13T00:58:49,584 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=DISABLING in hbase:meta 2024-12-13T00:58:49,593 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithMergeRegion to state=DISABLING 2024-12-13T00:58:49,593 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=130, ppid=129, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion}] 2024-12-13T00:58:49,595 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=131, ppid=130, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=7291031b7331870285a02ea61c6bc826, UNASSIGN}, {pid=132, ppid=130, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=e6cf1838296a4d970f499ee9f19b4ba1, UNASSIGN}] 2024-12-13T00:58:49,595 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=132, ppid=130, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=e6cf1838296a4d970f499ee9f19b4ba1, UNASSIGN 2024-12-13T00:58:49,596 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=131, ppid=130, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=7291031b7331870285a02ea61c6bc826, UNASSIGN 2024-12-13T00:58:49,596 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=132 updating hbase:meta row=e6cf1838296a4d970f499ee9f19b4ba1, regionState=CLOSING, regionLocation=9812beb1e4cd,45231,1734051384968 2024-12-13T00:58:49,596 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=131 updating hbase:meta row=7291031b7331870285a02ea61c6bc826, regionState=CLOSING, regionLocation=9812beb1e4cd,46075,1734051384875 2024-12-13T00:58:49,598 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-13T00:58:49,598 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=133, ppid=132, state=RUNNABLE; CloseRegionProcedure e6cf1838296a4d970f499ee9f19b4ba1, server=9812beb1e4cd,45231,1734051384968}] 2024-12-13T00:58:49,598 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-13T00:58:49,598 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=134, ppid=131, state=RUNNABLE; CloseRegionProcedure 7291031b7331870285a02ea61c6bc826, server=9812beb1e4cd,46075,1734051384875}] 2024-12-13T00:58:49,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=129 2024-12-13T00:58:49,749 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,45231,1734051384968 2024-12-13T00:58:49,750 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] handler.UnassignRegionHandler(124): Close e6cf1838296a4d970f499ee9f19b4ba1 2024-12-13T00:58:49,750 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-13T00:58:49,750 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1681): Closing e6cf1838296a4d970f499ee9f19b4ba1, disabling compactions & flushes 2024-12-13T00:58:49,750 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion,1,1734051506790.e6cf1838296a4d970f499ee9f19b4ba1. 2024-12-13T00:58:49,750 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1734051506790.e6cf1838296a4d970f499ee9f19b4ba1. 2024-12-13T00:58:49,750 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1734051506790.e6cf1838296a4d970f499ee9f19b4ba1. after waiting 0 ms 2024-12-13T00:58:49,750 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,1,1734051506790.e6cf1838296a4d970f499ee9f19b4ba1. 2024-12-13T00:58:49,750 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,46075,1734051384875 2024-12-13T00:58:49,751 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] handler.UnassignRegionHandler(124): Close 7291031b7331870285a02ea61c6bc826 2024-12-13T00:58:49,751 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-13T00:58:49,751 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] regionserver.HRegion(1681): Closing 7291031b7331870285a02ea61c6bc826, disabling compactions & flushes 2024-12-13T00:58:49,751 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion,,1734051506790.7291031b7331870285a02ea61c6bc826. 2024-12-13T00:58:49,751 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion,,1734051506790.7291031b7331870285a02ea61c6bc826. 2024-12-13T00:58:49,751 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,,1734051506790.7291031b7331870285a02ea61c6bc826. after waiting 0 ms 2024-12-13T00:58:49,751 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,,1734051506790.7291031b7331870285a02ea61c6bc826. 2024-12-13T00:58:49,757 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion/e6cf1838296a4d970f499ee9f19b4ba1/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-13T00:58:49,757 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-13T00:58:49,758 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion,1,1734051506790.e6cf1838296a4d970f499ee9f19b4ba1. 2024-12-13T00:58:49,758 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1635): Region close journal for e6cf1838296a4d970f499ee9f19b4ba1: 2024-12-13T00:58:49,760 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion/7291031b7331870285a02ea61c6bc826/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-13T00:58:49,760 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] handler.UnassignRegionHandler(170): Closed e6cf1838296a4d970f499ee9f19b4ba1 2024-12-13T00:58:49,761 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=132 updating hbase:meta row=e6cf1838296a4d970f499ee9f19b4ba1, regionState=CLOSED 2024-12-13T00:58:49,762 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-13T00:58:49,762 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion,,1734051506790.7291031b7331870285a02ea61c6bc826. 2024-12-13T00:58:49,762 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] regionserver.HRegion(1635): Region close journal for 7291031b7331870285a02ea61c6bc826: 2024-12-13T00:58:49,764 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] handler.UnassignRegionHandler(170): Closed 7291031b7331870285a02ea61c6bc826 2024-12-13T00:58:49,765 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=131 updating hbase:meta row=7291031b7331870285a02ea61c6bc826, regionState=CLOSED 2024-12-13T00:58:49,765 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=133, resume processing ppid=132 2024-12-13T00:58:49,765 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=133, ppid=132, state=SUCCESS; CloseRegionProcedure e6cf1838296a4d970f499ee9f19b4ba1, server=9812beb1e4cd,45231,1734051384968 in 165 msec 2024-12-13T00:58:49,767 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=132, ppid=130, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=e6cf1838296a4d970f499ee9f19b4ba1, UNASSIGN in 170 msec 2024-12-13T00:58:49,768 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=134, resume processing ppid=131 2024-12-13T00:58:49,768 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=134, ppid=131, state=SUCCESS; CloseRegionProcedure 7291031b7331870285a02ea61c6bc826, server=9812beb1e4cd,46075,1734051384875 in 168 msec 2024-12-13T00:58:49,770 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=131, resume processing ppid=130 2024-12-13T00:58:49,770 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=131, ppid=130, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=7291031b7331870285a02ea61c6bc826, UNASSIGN in 173 msec 2024-12-13T00:58:49,772 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=130, resume processing ppid=129 2024-12-13T00:58:49,772 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=130, ppid=129, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 177 msec 2024-12-13T00:58:49,773 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734051529773"}]},"ts":"1734051529773"} 2024-12-13T00:58:49,774 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=DISABLED in hbase:meta 2024-12-13T00:58:49,776 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithMergeRegion to state=DISABLED 2024-12-13T00:58:49,777 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=129, state=SUCCESS; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 195 msec 2024-12-13T00:58:49,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=129 2024-12-13T00:58:49,889 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion, procId: 129 completed 2024-12-13T00:58:49,890 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithMergeRegion 2024-12-13T00:58:49,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] procedure2.ProcedureExecutor(1098): Stored pid=135, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-13T00:58:49,892 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=135, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-13T00:58:49,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testExportFileSystemStateWithMergeRegion 2024-12-13T00:58:49,894 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=135, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-13T00:58:49,895 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40687 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithMergeRegion 2024-12-13T00:58:49,898 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion/7291031b7331870285a02ea61c6bc826 2024-12-13T00:58:49,899 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-13T00:58:49,899 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion/e6cf1838296a4d970f499ee9f19b4ba1 2024-12-13T00:58:49,899 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40687-0x10088dfff310003, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-13T00:58:49,899 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46075-0x10088dfff310001, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-13T00:58:49,899 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45231-0x10088dfff310002, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-13T00:58:49,900 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-13T00:58:49,900 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-13T00:58:49,900 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-13T00:58:49,901 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-13T00:58:49,903 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion/e6cf1838296a4d970f499ee9f19b4ba1/cf, FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion/e6cf1838296a4d970f499ee9f19b4ba1/recovered.edits] 2024-12-13T00:58:49,903 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion/7291031b7331870285a02ea61c6bc826/cf, FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion/7291031b7331870285a02ea61c6bc826/recovered.edits] 2024-12-13T00:58:49,908 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45231-0x10088dfff310002, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-13T00:58:49,908 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45231-0x10088dfff310002, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T00:58:49,908 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46075-0x10088dfff310001, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-13T00:58:49,908 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46075-0x10088dfff310001, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T00:58:49,908 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40687-0x10088dfff310003, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-13T00:58:49,908 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40687-0x10088dfff310003, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T00:58:49,909 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-13T00:58:49,909 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T00:58:49,912 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion/e6cf1838296a4d970f499ee9f19b4ba1/cf/a07ef0a7d7794f12b3f3d8017dcab06e to hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/e6cf1838296a4d970f499ee9f19b4ba1/cf/a07ef0a7d7794f12b3f3d8017dcab06e 2024-12-13T00:58:49,912 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion/7291031b7331870285a02ea61c6bc826/cf/428ce3456fea4d7aa7066ee5ca32483d to hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/7291031b7331870285a02ea61c6bc826/cf/428ce3456fea4d7aa7066ee5ca32483d 2024-12-13T00:58:49,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=135 2024-12-13T00:58:49,918 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion/7291031b7331870285a02ea61c6bc826/recovered.edits/9.seqid to hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/7291031b7331870285a02ea61c6bc826/recovered.edits/9.seqid 2024-12-13T00:58:49,918 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion/e6cf1838296a4d970f499ee9f19b4ba1/recovered.edits/9.seqid to hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/e6cf1838296a4d970f499ee9f19b4ba1/recovered.edits/9.seqid 2024-12-13T00:58:49,919 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion/7291031b7331870285a02ea61c6bc826 2024-12-13T00:58:49,919 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithMergeRegion/e6cf1838296a4d970f499ee9f19b4ba1 2024-12-13T00:58:49,919 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithMergeRegion regions 2024-12-13T00:58:49,922 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=135, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-13T00:58:49,926 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithMergeRegion from hbase:meta 2024-12-13T00:58:49,941 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportFileSystemStateWithMergeRegion' descriptor. 2024-12-13T00:58:49,943 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=135, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-13T00:58:49,943 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportFileSystemStateWithMergeRegion' from region states. 2024-12-13T00:58:49,943 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion,,1734051506790.7291031b7331870285a02ea61c6bc826.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734051529943"}]},"ts":"9223372036854775807"} 2024-12-13T00:58:49,943 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion,1,1734051506790.e6cf1838296a4d970f499ee9f19b4ba1.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734051529943"}]},"ts":"9223372036854775807"} 2024-12-13T00:58:49,945 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-13T00:58:49,945 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 7291031b7331870285a02ea61c6bc826, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1734051506790.7291031b7331870285a02ea61c6bc826.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => e6cf1838296a4d970f499ee9f19b4ba1, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1734051506790.e6cf1838296a4d970f499ee9f19b4ba1.', STARTKEY => '1', ENDKEY => ''}] 2024-12-13T00:58:49,945 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportFileSystemStateWithMergeRegion' as deleted. 2024-12-13T00:58:49,946 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734051529946"}]},"ts":"9223372036854775807"} 2024-12-13T00:58:49,947 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testExportFileSystemStateWithMergeRegion state from META 2024-12-13T00:58:49,950 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=135, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-13T00:58:49,951 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=135, state=SUCCESS; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 60 msec 2024-12-13T00:58:50,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=135 2024-12-13T00:58:50,015 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion, procId: 135 completed 2024-12-13T00:58:50,026 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemStateWithMergeRegion" 2024-12-13T00:58:50,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-13T00:58:50,031 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithMergeRegion" 2024-12-13T00:58:50,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-13T00:58:50,034 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithMergeRegion-1" 2024-12-13T00:58:50,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-13T00:58:50,057 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithMergeRegion Thread=793 (was 784) Potentially hanging thread: hconnection-0x71b2f156-shared-pool-30 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-16 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/9812beb1e4cd:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x71b2f156-shared-pool-29 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x71b2f156-shared-pool-32 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x71b2f156-shared-pool-31 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-4714 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-576565181_1 at /127.0.0.1:55654 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43323 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-576565181_1 at /127.0.0.1:54584 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-15 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 3733) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x71b2f156-shared-pool-33 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1941338361) connection to localhost/127.0.0.1:43323 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1349531497_22 at /127.0.0.1:55674 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/9812beb1e4cd:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x71b2f156-shared-pool-34 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1349531497_22 at /127.0.0.1:47142 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1349531497_22 at /127.0.0.1:54592 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=775 (was 771) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=699 (was 698) - SystemLoadAverage LEAK? -, ProcessCount=20 (was 20), AvailableMemoryMB=3670 (was 3881) 2024-12-13T00:58:50,057 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=793 is superior to 500 2024-12-13T00:58:50,084 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportExpiredSnapshot Thread=793, OpenFileDescriptor=775, MaxFileDescriptor=1048576, SystemLoadAverage=699, ProcessCount=20, AvailableMemoryMB=3672 2024-12-13T00:58:50,084 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=793 is superior to 500 2024-12-13T00:58:50,087 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-13T00:58:50,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] procedure2.ProcedureExecutor(1098): Stored pid=136, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-13T00:58:50,090 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=136, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_PRE_OPERATION 2024-12-13T00:58:50,090 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T00:58:50,090 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportExpiredSnapshot" procId is: 136 2024-12-13T00:58:50,091 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=136, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-13T00:58:50,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-13T00:58:50,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742164_1340 (size=407) 2024-12-13T00:58:50,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742164_1340 (size=407) 2024-12-13T00:58:50,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742164_1340 (size=407) 2024-12-13T00:58:50,125 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 812185a7a28ca83402d75fc2c5894633, NAME => 'testtb-testExportExpiredSnapshot,,1734051530087.812185a7a28ca83402d75fc2c5894633.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee 2024-12-13T00:58:50,128 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => 350758ce3bbce2773a2d63eb3ca1787b, NAME => 'testtb-testExportExpiredSnapshot,1,1734051530087.350758ce3bbce2773a2d63eb3ca1787b.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee 2024-12-13T00:58:50,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742165_1341 (size=68) 2024-12-13T00:58:50,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742165_1341 (size=68) 2024-12-13T00:58:50,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742165_1341 (size=68) 2024-12-13T00:58:50,145 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testExportExpiredSnapshot,1,1734051530087.350758ce3bbce2773a2d63eb3ca1787b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T00:58:50,146 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1681): Closing 350758ce3bbce2773a2d63eb3ca1787b, disabling compactions & flushes 2024-12-13T00:58:50,146 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testExportExpiredSnapshot,1,1734051530087.350758ce3bbce2773a2d63eb3ca1787b. 2024-12-13T00:58:50,146 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportExpiredSnapshot,1,1734051530087.350758ce3bbce2773a2d63eb3ca1787b. 2024-12-13T00:58:50,146 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportExpiredSnapshot,1,1734051530087.350758ce3bbce2773a2d63eb3ca1787b. after waiting 0 ms 2024-12-13T00:58:50,146 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportExpiredSnapshot,1,1734051530087.350758ce3bbce2773a2d63eb3ca1787b. 2024-12-13T00:58:50,146 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testExportExpiredSnapshot,1,1734051530087.350758ce3bbce2773a2d63eb3ca1787b. 2024-12-13T00:58:50,146 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1635): Region close journal for 350758ce3bbce2773a2d63eb3ca1787b: 2024-12-13T00:58:50,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742166_1342 (size=68) 2024-12-13T00:58:50,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742166_1342 (size=68) 2024-12-13T00:58:50,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742166_1342 (size=68) 2024-12-13T00:58:50,156 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testExportExpiredSnapshot,,1734051530087.812185a7a28ca83402d75fc2c5894633.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T00:58:50,156 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1681): Closing 812185a7a28ca83402d75fc2c5894633, disabling compactions & flushes 2024-12-13T00:58:50,156 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testExportExpiredSnapshot,,1734051530087.812185a7a28ca83402d75fc2c5894633. 2024-12-13T00:58:50,156 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportExpiredSnapshot,,1734051530087.812185a7a28ca83402d75fc2c5894633. 2024-12-13T00:58:50,156 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportExpiredSnapshot,,1734051530087.812185a7a28ca83402d75fc2c5894633. after waiting 0 ms 2024-12-13T00:58:50,156 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportExpiredSnapshot,,1734051530087.812185a7a28ca83402d75fc2c5894633. 2024-12-13T00:58:50,156 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testExportExpiredSnapshot,,1734051530087.812185a7a28ca83402d75fc2c5894633. 2024-12-13T00:58:50,156 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1635): Region close journal for 812185a7a28ca83402d75fc2c5894633: 2024-12-13T00:58:50,159 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=136, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_ADD_TO_META 2024-12-13T00:58:50,160 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportExpiredSnapshot,1,1734051530087.350758ce3bbce2773a2d63eb3ca1787b.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1734051530159"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734051530159"}]},"ts":"1734051530159"} 2024-12-13T00:58:50,160 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportExpiredSnapshot,,1734051530087.812185a7a28ca83402d75fc2c5894633.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1734051530159"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734051530159"}]},"ts":"1734051530159"} 2024-12-13T00:58:50,162 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-13T00:58:50,162 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=136, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-13T00:58:50,162 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734051530162"}]},"ts":"1734051530162"} 2024-12-13T00:58:50,163 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportExpiredSnapshot, state=ENABLING in hbase:meta 2024-12-13T00:58:50,167 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(202): Hosts are {9812beb1e4cd=0} racks are {/default-rack=0} 2024-12-13T00:58:50,168 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-13T00:58:50,168 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-13T00:58:50,168 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-13T00:58:50,168 INFO [PEWorker-1 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-13T00:58:50,168 INFO [PEWorker-1 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-13T00:58:50,168 INFO [PEWorker-1 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-13T00:58:50,168 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-13T00:58:50,168 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=137, ppid=136, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=812185a7a28ca83402d75fc2c5894633, ASSIGN}, {pid=138, ppid=136, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=350758ce3bbce2773a2d63eb3ca1787b, ASSIGN}] 2024-12-13T00:58:50,169 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=138, ppid=136, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=350758ce3bbce2773a2d63eb3ca1787b, ASSIGN 2024-12-13T00:58:50,170 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=137, ppid=136, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=812185a7a28ca83402d75fc2c5894633, ASSIGN 2024-12-13T00:58:50,170 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=137, ppid=136, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=812185a7a28ca83402d75fc2c5894633, ASSIGN; state=OFFLINE, location=9812beb1e4cd,45231,1734051384968; forceNewPlan=false, retain=false 2024-12-13T00:58:50,170 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=138, ppid=136, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=350758ce3bbce2773a2d63eb3ca1787b, ASSIGN; state=OFFLINE, location=9812beb1e4cd,46075,1734051384875; forceNewPlan=false, retain=false 2024-12-13T00:58:50,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-13T00:58:50,321 INFO [9812beb1e4cd:38293 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-13T00:58:50,321 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=138 updating hbase:meta row=350758ce3bbce2773a2d63eb3ca1787b, regionState=OPENING, regionLocation=9812beb1e4cd,46075,1734051384875 2024-12-13T00:58:50,321 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=137 updating hbase:meta row=812185a7a28ca83402d75fc2c5894633, regionState=OPENING, regionLocation=9812beb1e4cd,45231,1734051384968 2024-12-13T00:58:50,323 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=139, ppid=138, state=RUNNABLE; OpenRegionProcedure 350758ce3bbce2773a2d63eb3ca1787b, server=9812beb1e4cd,46075,1734051384875}] 2024-12-13T00:58:50,324 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=140, ppid=137, state=RUNNABLE; OpenRegionProcedure 812185a7a28ca83402d75fc2c5894633, server=9812beb1e4cd,45231,1734051384968}] 2024-12-13T00:58:50,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-13T00:58:50,475 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,46075,1734051384875 2024-12-13T00:58:50,477 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,45231,1734051384968 2024-12-13T00:58:50,479 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] handler.AssignRegionHandler(135): Open testtb-testExportExpiredSnapshot,1,1734051530087.350758ce3bbce2773a2d63eb3ca1787b. 2024-12-13T00:58:50,479 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(7285): Opening region: {ENCODED => 350758ce3bbce2773a2d63eb3ca1787b, NAME => 'testtb-testExportExpiredSnapshot,1,1734051530087.350758ce3bbce2773a2d63eb3ca1787b.', STARTKEY => '1', ENDKEY => ''} 2024-12-13T00:58:50,479 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportExpiredSnapshot,1,1734051530087.350758ce3bbce2773a2d63eb3ca1787b. service=AccessControlService 2024-12-13T00:58:50,480 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-13T00:58:50,480 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportExpiredSnapshot 350758ce3bbce2773a2d63eb3ca1787b 2024-12-13T00:58:50,480 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(894): Instantiated testtb-testExportExpiredSnapshot,1,1734051530087.350758ce3bbce2773a2d63eb3ca1787b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T00:58:50,480 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(7327): checking encryption for 350758ce3bbce2773a2d63eb3ca1787b 2024-12-13T00:58:50,480 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(7330): checking classloading for 350758ce3bbce2773a2d63eb3ca1787b 2024-12-13T00:58:50,482 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] handler.AssignRegionHandler(135): Open testtb-testExportExpiredSnapshot,,1734051530087.812185a7a28ca83402d75fc2c5894633. 2024-12-13T00:58:50,482 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(7285): Opening region: {ENCODED => 812185a7a28ca83402d75fc2c5894633, NAME => 'testtb-testExportExpiredSnapshot,,1734051530087.812185a7a28ca83402d75fc2c5894633.', STARTKEY => '', ENDKEY => '1'} 2024-12-13T00:58:50,482 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportExpiredSnapshot,,1734051530087.812185a7a28ca83402d75fc2c5894633. service=AccessControlService 2024-12-13T00:58:50,483 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-13T00:58:50,483 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportExpiredSnapshot 812185a7a28ca83402d75fc2c5894633 2024-12-13T00:58:50,483 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(894): Instantiated testtb-testExportExpiredSnapshot,,1734051530087.812185a7a28ca83402d75fc2c5894633.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T00:58:50,483 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(7327): checking encryption for 812185a7a28ca83402d75fc2c5894633 2024-12-13T00:58:50,483 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(7330): checking classloading for 812185a7a28ca83402d75fc2c5894633 2024-12-13T00:58:50,483 INFO [StoreOpener-350758ce3bbce2773a2d63eb3ca1787b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 350758ce3bbce2773a2d63eb3ca1787b 2024-12-13T00:58:50,485 INFO [StoreOpener-812185a7a28ca83402d75fc2c5894633-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 812185a7a28ca83402d75fc2c5894633 2024-12-13T00:58:50,487 INFO [StoreOpener-350758ce3bbce2773a2d63eb3ca1787b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 350758ce3bbce2773a2d63eb3ca1787b columnFamilyName cf 2024-12-13T00:58:50,487 DEBUG [StoreOpener-350758ce3bbce2773a2d63eb3ca1787b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T00:58:50,488 INFO [StoreOpener-350758ce3bbce2773a2d63eb3ca1787b-1 {}] regionserver.HStore(327): Store=350758ce3bbce2773a2d63eb3ca1787b/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T00:58:50,488 INFO [StoreOpener-812185a7a28ca83402d75fc2c5894633-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 812185a7a28ca83402d75fc2c5894633 columnFamilyName cf 2024-12-13T00:58:50,488 DEBUG [StoreOpener-812185a7a28ca83402d75fc2c5894633-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T00:58:50,489 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportExpiredSnapshot/350758ce3bbce2773a2d63eb3ca1787b 2024-12-13T00:58:50,489 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportExpiredSnapshot/350758ce3bbce2773a2d63eb3ca1787b 2024-12-13T00:58:50,489 INFO [StoreOpener-812185a7a28ca83402d75fc2c5894633-1 {}] regionserver.HStore(327): Store=812185a7a28ca83402d75fc2c5894633/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T00:58:50,491 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportExpiredSnapshot/812185a7a28ca83402d75fc2c5894633 2024-12-13T00:58:50,491 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportExpiredSnapshot/812185a7a28ca83402d75fc2c5894633 2024-12-13T00:58:50,499 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(1085): writing seq id for 350758ce3bbce2773a2d63eb3ca1787b 2024-12-13T00:58:50,503 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(1085): writing seq id for 812185a7a28ca83402d75fc2c5894633 2024-12-13T00:58:50,503 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportExpiredSnapshot/350758ce3bbce2773a2d63eb3ca1787b/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T00:58:50,504 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(1102): Opened 350758ce3bbce2773a2d63eb3ca1787b; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69994381, jitterRate=0.04299755394458771}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-13T00:58:50,505 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(1001): Region open journal for 350758ce3bbce2773a2d63eb3ca1787b: 2024-12-13T00:58:50,507 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportExpiredSnapshot,1,1734051530087.350758ce3bbce2773a2d63eb3ca1787b., pid=139, masterSystemTime=1734051530475 2024-12-13T00:58:50,507 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportExpiredSnapshot/812185a7a28ca83402d75fc2c5894633/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T00:58:50,507 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(1102): Opened 812185a7a28ca83402d75fc2c5894633; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74856211, jitterRate=0.11544446647167206}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-13T00:58:50,508 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(1001): Region open journal for 812185a7a28ca83402d75fc2c5894633: 2024-12-13T00:58:50,509 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportExpiredSnapshot,,1734051530087.812185a7a28ca83402d75fc2c5894633., pid=140, masterSystemTime=1734051530477 2024-12-13T00:58:50,511 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportExpiredSnapshot,1,1734051530087.350758ce3bbce2773a2d63eb3ca1787b. 2024-12-13T00:58:50,511 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] handler.AssignRegionHandler(164): Opened testtb-testExportExpiredSnapshot,1,1734051530087.350758ce3bbce2773a2d63eb3ca1787b. 2024-12-13T00:58:50,511 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=138 updating hbase:meta row=350758ce3bbce2773a2d63eb3ca1787b, regionState=OPEN, openSeqNum=2, regionLocation=9812beb1e4cd,46075,1734051384875 2024-12-13T00:58:50,512 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportExpiredSnapshot,,1734051530087.812185a7a28ca83402d75fc2c5894633. 2024-12-13T00:58:50,512 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] handler.AssignRegionHandler(164): Opened testtb-testExportExpiredSnapshot,,1734051530087.812185a7a28ca83402d75fc2c5894633. 2024-12-13T00:58:50,513 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=137 updating hbase:meta row=812185a7a28ca83402d75fc2c5894633, regionState=OPEN, openSeqNum=2, regionLocation=9812beb1e4cd,45231,1734051384968 2024-12-13T00:58:50,516 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=139, resume processing ppid=138 2024-12-13T00:58:50,516 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=139, ppid=138, state=SUCCESS; OpenRegionProcedure 350758ce3bbce2773a2d63eb3ca1787b, server=9812beb1e4cd,46075,1734051384875 in 191 msec 2024-12-13T00:58:50,517 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=140, resume processing ppid=137 2024-12-13T00:58:50,517 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=140, ppid=137, state=SUCCESS; OpenRegionProcedure 812185a7a28ca83402d75fc2c5894633, server=9812beb1e4cd,45231,1734051384968 in 191 msec 2024-12-13T00:58:50,518 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=138, ppid=136, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=350758ce3bbce2773a2d63eb3ca1787b, ASSIGN in 348 msec 2024-12-13T00:58:50,519 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=137, resume processing ppid=136 2024-12-13T00:58:50,519 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=137, ppid=136, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=812185a7a28ca83402d75fc2c5894633, ASSIGN in 349 msec 2024-12-13T00:58:50,520 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=136, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-13T00:58:50,520 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734051530520"}]},"ts":"1734051530520"} 2024-12-13T00:58:50,522 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportExpiredSnapshot, state=ENABLED in hbase:meta 2024-12-13T00:58:50,525 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=136, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_POST_OPERATION 2024-12-13T00:58:50,525 DEBUG [PEWorker-5 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testExportExpiredSnapshot jenkins: RWXCA 2024-12-13T00:58:50,527 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40687 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-13T00:58:50,529 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45231-0x10088dfff310002, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T00:58:50,529 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40687-0x10088dfff310003, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T00:58:50,529 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T00:58:50,529 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46075-0x10088dfff310001, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T00:58:50,531 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-13T00:58:50,531 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-13T00:58:50,531 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-13T00:58:50,532 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-13T00:58:50,534 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=136, state=SUCCESS; CreateTableProcedure table=testtb-testExportExpiredSnapshot in 445 msec 2024-12-13T00:58:50,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-13T00:58:50,695 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testExportExpiredSnapshot, procId: 136 completed 2024-12-13T00:58:50,695 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testtb-testExportExpiredSnapshot get assigned. Timeout = 60000ms 2024-12-13T00:58:50,695 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-13T00:58:50,699 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testtb-testExportExpiredSnapshot assigned to meta. Checking AM states. 2024-12-13T00:58:50,699 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-13T00:58:50,699 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testtb-testExportExpiredSnapshot assigned. 2024-12-13T00:58:50,702 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-13T00:58:50,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734051530703 (current time:1734051530703). 2024-12-13T00:58:50,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-13T00:58:50,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-12-13T00:58:50,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-13T00:58:50,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2d6e12d6 to 127.0.0.1:49424 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@dc9e955 2024-12-13T00:58:50,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e4e0bc4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T00:58:50,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T00:58:50,715 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45582, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T00:58:50,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2d6e12d6 to 127.0.0.1:49424 2024-12-13T00:58:50,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T00:58:50,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x205f4160 to 127.0.0.1:49424 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@629e877f 2024-12-13T00:58:50,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7c10b0c3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T00:58:50,725 DEBUG [hconnection-0x51622c26-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T00:58:50,726 INFO [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45584, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T00:58:50,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x205f4160 to 127.0.0.1:49424 2024-12-13T00:58:50,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T00:58:50,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-13T00:58:50,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-13T00:58:50,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] procedure2.ProcedureExecutor(1098): Stored pid=141, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-13T00:58:50,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 141 2024-12-13T00:58:50,732 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-13T00:58:50,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=141 2024-12-13T00:58:50,733 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-13T00:58:50,735 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-13T00:58:50,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742167_1343 (size=170) 2024-12-13T00:58:50,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742167_1343 (size=170) 2024-12-13T00:58:50,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742167_1343 (size=170) 2024-12-13T00:58:50,761 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-13T00:58:50,761 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=142, ppid=141, state=RUNNABLE; SnapshotRegionProcedure 812185a7a28ca83402d75fc2c5894633}, {pid=143, ppid=141, state=RUNNABLE; SnapshotRegionProcedure 350758ce3bbce2773a2d63eb3ca1787b}] 2024-12-13T00:58:50,762 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=142, ppid=141, state=RUNNABLE; SnapshotRegionProcedure 812185a7a28ca83402d75fc2c5894633 2024-12-13T00:58:50,762 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=143, ppid=141, state=RUNNABLE; SnapshotRegionProcedure 350758ce3bbce2773a2d63eb3ca1787b 2024-12-13T00:58:50,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=141 2024-12-13T00:58:50,913 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,46075,1734051384875 2024-12-13T00:58:50,913 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,45231,1734051384968 2024-12-13T00:58:50,914 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46075 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=143 2024-12-13T00:58:50,915 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45231 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=142 2024-12-13T00:58:50,915 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=143}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,1,1734051530087.350758ce3bbce2773a2d63eb3ca1787b. 2024-12-13T00:58:50,915 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,,1734051530087.812185a7a28ca83402d75fc2c5894633. 2024-12-13T00:58:50,915 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HRegion(2538): Flush status journal for 812185a7a28ca83402d75fc2c5894633: 2024-12-13T00:58:50,915 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=143}] regionserver.HRegion(2538): Flush status journal for 350758ce3bbce2773a2d63eb3ca1787b: 2024-12-13T00:58:50,915 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,,1734051530087.812185a7a28ca83402d75fc2c5894633. for emptySnaptb0-testExportExpiredSnapshot completed. 2024-12-13T00:58:50,915 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=143}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,1,1734051530087.350758ce3bbce2773a2d63eb3ca1787b. for emptySnaptb0-testExportExpiredSnapshot completed. 2024-12-13T00:58:50,915 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,,1734051530087.812185a7a28ca83402d75fc2c5894633.' region-info for snapshot=emptySnaptb0-testExportExpiredSnapshot 2024-12-13T00:58:50,915 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=143}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,1,1734051530087.350758ce3bbce2773a2d63eb3ca1787b.' region-info for snapshot=emptySnaptb0-testExportExpiredSnapshot 2024-12-13T00:58:50,915 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-13T00:58:50,915 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=143}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-13T00:58:50,915 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-13T00:58:50,915 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=143}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-13T00:58:50,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742169_1345 (size=71) 2024-12-13T00:58:50,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742169_1345 (size=71) 2024-12-13T00:58:50,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742169_1345 (size=71) 2024-12-13T00:58:50,933 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=143}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,1,1734051530087.350758ce3bbce2773a2d63eb3ca1787b. 2024-12-13T00:58:50,933 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=143 2024-12-13T00:58:50,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.HMaster(4106): Remote procedure done, pid=143 2024-12-13T00:58:50,934 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportExpiredSnapshot on region 350758ce3bbce2773a2d63eb3ca1787b 2024-12-13T00:58:50,934 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=143, ppid=141, state=RUNNABLE; SnapshotRegionProcedure 350758ce3bbce2773a2d63eb3ca1787b 2024-12-13T00:58:50,936 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=143, ppid=141, state=SUCCESS; SnapshotRegionProcedure 350758ce3bbce2773a2d63eb3ca1787b in 174 msec 2024-12-13T00:58:50,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742168_1344 (size=71) 2024-12-13T00:58:50,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742168_1344 (size=71) 2024-12-13T00:58:50,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742168_1344 (size=71) 2024-12-13T00:58:50,939 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,,1734051530087.812185a7a28ca83402d75fc2c5894633. 2024-12-13T00:58:50,940 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=142 2024-12-13T00:58:50,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.HMaster(4106): Remote procedure done, pid=142 2024-12-13T00:58:50,940 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportExpiredSnapshot on region 812185a7a28ca83402d75fc2c5894633 2024-12-13T00:58:50,940 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=142, ppid=141, state=RUNNABLE; SnapshotRegionProcedure 812185a7a28ca83402d75fc2c5894633 2024-12-13T00:58:50,942 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=142, resume processing ppid=141 2024-12-13T00:58:50,943 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-13T00:58:50,943 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=142, ppid=141, state=SUCCESS; SnapshotRegionProcedure 812185a7a28ca83402d75fc2c5894633 in 180 msec 2024-12-13T00:58:50,943 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-13T00:58:50,944 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-13T00:58:50,944 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportExpiredSnapshot 2024-12-13T00:58:50,944 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/.tmp/emptySnaptb0-testExportExpiredSnapshot 2024-12-13T00:58:50,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742170_1346 (size=552) 2024-12-13T00:58:50,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742170_1346 (size=552) 2024-12-13T00:58:50,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742170_1346 (size=552) 2024-12-13T00:58:51,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=141 2024-12-13T00:58:51,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=141 2024-12-13T00:58:51,366 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-13T00:58:51,371 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-13T00:58:51,372 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/.tmp/emptySnaptb0-testExportExpiredSnapshot to hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/emptySnaptb0-testExportExpiredSnapshot 2024-12-13T00:58:51,373 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-13T00:58:51,373 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 141 2024-12-13T00:58:51,375 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=141, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } in 643 msec 2024-12-13T00:58:51,482 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportExpiredSnapshot' 2024-12-13T00:58:51,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=141 2024-12-13T00:58:51,838 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportExpiredSnapshot, procId: 141 completed 2024-12-13T00:58:51,847 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45231 {}] regionserver.HRegion(8254): writing data to region testtb-testExportExpiredSnapshot,,1734051530087.812185a7a28ca83402d75fc2c5894633. with WAL disabled. Data may be lost in the event of a crash. 2024-12-13T00:58:51,850 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46075 {}] regionserver.HRegion(8254): writing data to region testtb-testExportExpiredSnapshot,1,1734051530087.350758ce3bbce2773a2d63eb3ca1787b. with WAL disabled. Data may be lost in the event of a crash. 2024-12-13T00:58:51,861 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testExportExpiredSnapshot 2024-12-13T00:58:51,861 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testExportExpiredSnapshot,,1734051530087.812185a7a28ca83402d75fc2c5894633. 2024-12-13T00:58:51,861 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-13T00:58:51,881 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-13T00:58:51,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734051531881 (current time:1734051531881). 2024-12-13T00:58:51,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-13T00:58:51,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-12-13T00:58:51,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-13T00:58:51,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6430d586 to 127.0.0.1:49424 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@24f3d63a 2024-12-13T00:58:51,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4e6c5cf4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T00:58:51,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T00:58:51,896 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45600, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T00:58:51,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6430d586 to 127.0.0.1:49424 2024-12-13T00:58:51,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T00:58:51,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4eed7cc6 to 127.0.0.1:49424 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@74153127 2024-12-13T00:58:51,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@58b258ad, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T00:58:51,904 DEBUG [hconnection-0x6c1469a4-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T00:58:51,905 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45614, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T00:58:51,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4eed7cc6 to 127.0.0.1:49424 2024-12-13T00:58:51,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T00:58:51,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-13T00:58:51,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-13T00:58:51,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] procedure2.ProcedureExecutor(1098): Stored pid=144, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-13T00:58:51,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 144 2024-12-13T00:58:51,910 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-13T00:58:51,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-13T00:58:51,911 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-13T00:58:51,913 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-13T00:58:51,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742171_1347 (size=165) 2024-12-13T00:58:51,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742171_1347 (size=165) 2024-12-13T00:58:51,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742171_1347 (size=165) 2024-12-13T00:58:51,920 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-13T00:58:51,920 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=145, ppid=144, state=RUNNABLE; SnapshotRegionProcedure 812185a7a28ca83402d75fc2c5894633}, {pid=146, ppid=144, state=RUNNABLE; SnapshotRegionProcedure 350758ce3bbce2773a2d63eb3ca1787b}] 2024-12-13T00:58:51,921 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=146, ppid=144, state=RUNNABLE; SnapshotRegionProcedure 350758ce3bbce2773a2d63eb3ca1787b 2024-12-13T00:58:51,921 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=145, ppid=144, state=RUNNABLE; SnapshotRegionProcedure 812185a7a28ca83402d75fc2c5894633 2024-12-13T00:58:52,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-13T00:58:52,073 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,46075,1734051384875 2024-12-13T00:58:52,073 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,45231,1734051384968 2024-12-13T00:58:52,073 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46075 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=146 2024-12-13T00:58:52,073 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45231 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=145 2024-12-13T00:58:52,074 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,,1734051530087.812185a7a28ca83402d75fc2c5894633. 2024-12-13T00:58:52,074 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,1,1734051530087.350758ce3bbce2773a2d63eb3ca1787b. 2024-12-13T00:58:52,074 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.HRegion(2837): Flushing 812185a7a28ca83402d75fc2c5894633 1/1 column families, dataSize=132 B heapSize=544 B 2024-12-13T00:58:52,074 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.HRegion(2837): Flushing 350758ce3bbce2773a2d63eb3ca1787b 1/1 column families, dataSize=3.13 KB heapSize=7 KB 2024-12-13T00:58:52,098 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportExpiredSnapshot/812185a7a28ca83402d75fc2c5894633/.tmp/cf/aba444dc0d274802b6124d3e294d41ee is 71, key is 0d0d5cd87ccb456cd8f85f832d20afa5/cf:q/1734051531847/Put/seqid=0 2024-12-13T00:58:52,108 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportExpiredSnapshot/350758ce3bbce2773a2d63eb3ca1787b/.tmp/cf/dbc990e61d7a43e8b6260b0be4f8d51d is 71, key is 1e436e7e098233b88f361c3413cf5fb0/cf:q/1734051531850/Put/seqid=0 2024-12-13T00:58:52,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742172_1348 (size=5216) 2024-12-13T00:58:52,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742172_1348 (size=5216) 2024-12-13T00:58:52,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742172_1348 (size=5216) 2024-12-13T00:58:52,112 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=132 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportExpiredSnapshot/812185a7a28ca83402d75fc2c5894633/.tmp/cf/aba444dc0d274802b6124d3e294d41ee 2024-12-13T00:58:52,118 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportExpiredSnapshot/812185a7a28ca83402d75fc2c5894633/.tmp/cf/aba444dc0d274802b6124d3e294d41ee as hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportExpiredSnapshot/812185a7a28ca83402d75fc2c5894633/cf/aba444dc0d274802b6124d3e294d41ee 2024-12-13T00:58:52,123 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportExpiredSnapshot/812185a7a28ca83402d75fc2c5894633/cf/aba444dc0d274802b6124d3e294d41ee, entries=2, sequenceid=6, filesize=5.1 K 2024-12-13T00:58:52,124 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.HRegion(3040): Finished flush of dataSize ~132 B/132, heapSize ~528 B/528, currentSize=0 B/0 for 812185a7a28ca83402d75fc2c5894633 in 50ms, sequenceid=6, compaction requested=false 2024-12-13T00:58:52,124 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.HRegion(2538): Flush status journal for 812185a7a28ca83402d75fc2c5894633: 2024-12-13T00:58:52,124 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,,1734051530087.812185a7a28ca83402d75fc2c5894633. for snaptb0-testExportExpiredSnapshot completed. 2024-12-13T00:58:52,124 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,,1734051530087.812185a7a28ca83402d75fc2c5894633.' region-info for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-13T00:58:52,124 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-13T00:58:52,124 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportExpiredSnapshot/812185a7a28ca83402d75fc2c5894633/cf/aba444dc0d274802b6124d3e294d41ee] hfiles 2024-12-13T00:58:52,125 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportExpiredSnapshot/812185a7a28ca83402d75fc2c5894633/cf/aba444dc0d274802b6124d3e294d41ee for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-13T00:58:52,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742173_1349 (size=8394) 2024-12-13T00:58:52,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742173_1349 (size=8394) 2024-12-13T00:58:52,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742173_1349 (size=8394) 2024-12-13T00:58:52,129 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.13 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportExpiredSnapshot/350758ce3bbce2773a2d63eb3ca1787b/.tmp/cf/dbc990e61d7a43e8b6260b0be4f8d51d 2024-12-13T00:58:52,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742174_1350 (size=110) 2024-12-13T00:58:52,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742174_1350 (size=110) 2024-12-13T00:58:52,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742174_1350 (size=110) 2024-12-13T00:58:52,133 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,,1734051530087.812185a7a28ca83402d75fc2c5894633. 2024-12-13T00:58:52,133 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=145 2024-12-13T00:58:52,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.HMaster(4106): Remote procedure done, pid=145 2024-12-13T00:58:52,134 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportExpiredSnapshot on region 812185a7a28ca83402d75fc2c5894633 2024-12-13T00:58:52,134 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=145, ppid=144, state=RUNNABLE; SnapshotRegionProcedure 812185a7a28ca83402d75fc2c5894633 2024-12-13T00:58:52,135 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportExpiredSnapshot/350758ce3bbce2773a2d63eb3ca1787b/.tmp/cf/dbc990e61d7a43e8b6260b0be4f8d51d as hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportExpiredSnapshot/350758ce3bbce2773a2d63eb3ca1787b/cf/dbc990e61d7a43e8b6260b0be4f8d51d 2024-12-13T00:58:52,136 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=145, ppid=144, state=SUCCESS; SnapshotRegionProcedure 812185a7a28ca83402d75fc2c5894633 in 215 msec 2024-12-13T00:58:52,141 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportExpiredSnapshot/350758ce3bbce2773a2d63eb3ca1787b/cf/dbc990e61d7a43e8b6260b0be4f8d51d, entries=48, sequenceid=6, filesize=8.2 K 2024-12-13T00:58:52,142 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.HRegion(3040): Finished flush of dataSize ~3.13 KB/3204, heapSize ~6.98 KB/7152, currentSize=0 B/0 for 350758ce3bbce2773a2d63eb3ca1787b in 67ms, sequenceid=6, compaction requested=false 2024-12-13T00:58:52,142 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.HRegion(2538): Flush status journal for 350758ce3bbce2773a2d63eb3ca1787b: 2024-12-13T00:58:52,142 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,1,1734051530087.350758ce3bbce2773a2d63eb3ca1787b. for snaptb0-testExportExpiredSnapshot completed. 2024-12-13T00:58:52,142 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,1,1734051530087.350758ce3bbce2773a2d63eb3ca1787b.' region-info for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-13T00:58:52,142 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-13T00:58:52,142 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportExpiredSnapshot/350758ce3bbce2773a2d63eb3ca1787b/cf/dbc990e61d7a43e8b6260b0be4f8d51d] hfiles 2024-12-13T00:58:52,142 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportExpiredSnapshot/350758ce3bbce2773a2d63eb3ca1787b/cf/dbc990e61d7a43e8b6260b0be4f8d51d for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-13T00:58:52,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742175_1351 (size=110) 2024-12-13T00:58:52,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742175_1351 (size=110) 2024-12-13T00:58:52,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742175_1351 (size=110) 2024-12-13T00:58:52,151 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,1,1734051530087.350758ce3bbce2773a2d63eb3ca1787b. 2024-12-13T00:58:52,151 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=146 2024-12-13T00:58:52,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.HMaster(4106): Remote procedure done, pid=146 2024-12-13T00:58:52,151 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportExpiredSnapshot on region 350758ce3bbce2773a2d63eb3ca1787b 2024-12-13T00:58:52,151 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=146, ppid=144, state=RUNNABLE; SnapshotRegionProcedure 350758ce3bbce2773a2d63eb3ca1787b 2024-12-13T00:58:52,154 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=146, resume processing ppid=144 2024-12-13T00:58:52,154 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-13T00:58:52,154 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=146, ppid=144, state=SUCCESS; SnapshotRegionProcedure 350758ce3bbce2773a2d63eb3ca1787b in 232 msec 2024-12-13T00:58:52,154 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-13T00:58:52,155 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-13T00:58:52,155 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportExpiredSnapshot 2024-12-13T00:58:52,155 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/.tmp/snaptb0-testExportExpiredSnapshot 2024-12-13T00:58:52,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742176_1352 (size=630) 2024-12-13T00:58:52,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742176_1352 (size=630) 2024-12-13T00:58:52,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742176_1352 (size=630) 2024-12-13T00:58:52,171 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-13T00:58:52,180 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-13T00:58:52,180 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/.tmp/snaptb0-testExportExpiredSnapshot to hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/snaptb0-testExportExpiredSnapshot 2024-12-13T00:58:52,182 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-13T00:58:52,182 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 144 2024-12-13T00:58:52,183 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=144, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } in 274 msec 2024-12-13T00:58:52,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-13T00:58:52,212 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportExpiredSnapshot, procId: 144 completed 2024-12-13T00:58:52,214 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-13T00:58:52,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] procedure2.ProcedureExecutor(1098): Stored pid=147, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testExportExpiredSnapshot 2024-12-13T00:58:52,216 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=147, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_PRE_OPERATION 2024-12-13T00:58:52,216 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T00:58:52,216 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testExportExpiredSnapshot" procId is: 147 2024-12-13T00:58:52,221 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=147, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-13T00:58:52,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=147 2024-12-13T00:58:52,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742177_1353 (size=400) 2024-12-13T00:58:52,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742177_1353 (size=400) 2024-12-13T00:58:52,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742177_1353 (size=400) 2024-12-13T00:58:52,257 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 1fe5c0fc12b23be0acd40e1738aa9284, NAME => 'testExportExpiredSnapshot,,1734051532214.1fe5c0fc12b23be0acd40e1738aa9284.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee 2024-12-13T00:58:52,257 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => 44a9236bb87948b810c0985660aebd89, NAME => 'testExportExpiredSnapshot,1,1734051532214.44a9236bb87948b810c0985660aebd89.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee 2024-12-13T00:58:52,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=147 2024-12-13T00:58:52,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742178_1354 (size=61) 2024-12-13T00:58:52,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742178_1354 (size=61) 2024-12-13T00:58:52,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742178_1354 (size=61) 2024-12-13T00:58:52,328 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(894): Instantiated testExportExpiredSnapshot,,1734051532214.1fe5c0fc12b23be0acd40e1738aa9284.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T00:58:52,328 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1681): Closing 1fe5c0fc12b23be0acd40e1738aa9284, disabling compactions & flushes 2024-12-13T00:58:52,328 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1703): Closing region testExportExpiredSnapshot,,1734051532214.1fe5c0fc12b23be0acd40e1738aa9284. 2024-12-13T00:58:52,328 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testExportExpiredSnapshot,,1734051532214.1fe5c0fc12b23be0acd40e1738aa9284. 2024-12-13T00:58:52,328 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testExportExpiredSnapshot,,1734051532214.1fe5c0fc12b23be0acd40e1738aa9284. after waiting 0 ms 2024-12-13T00:58:52,328 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testExportExpiredSnapshot,,1734051532214.1fe5c0fc12b23be0acd40e1738aa9284. 2024-12-13T00:58:52,328 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1922): Closed testExportExpiredSnapshot,,1734051532214.1fe5c0fc12b23be0acd40e1738aa9284. 2024-12-13T00:58:52,328 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1635): Region close journal for 1fe5c0fc12b23be0acd40e1738aa9284: 2024-12-13T00:58:52,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742179_1355 (size=61) 2024-12-13T00:58:52,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742179_1355 (size=61) 2024-12-13T00:58:52,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742179_1355 (size=61) 2024-12-13T00:58:52,340 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(894): Instantiated testExportExpiredSnapshot,1,1734051532214.44a9236bb87948b810c0985660aebd89.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T00:58:52,340 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1681): Closing 44a9236bb87948b810c0985660aebd89, disabling compactions & flushes 2024-12-13T00:58:52,340 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1703): Closing region testExportExpiredSnapshot,1,1734051532214.44a9236bb87948b810c0985660aebd89. 2024-12-13T00:58:52,340 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testExportExpiredSnapshot,1,1734051532214.44a9236bb87948b810c0985660aebd89. 2024-12-13T00:58:52,340 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testExportExpiredSnapshot,1,1734051532214.44a9236bb87948b810c0985660aebd89. after waiting 0 ms 2024-12-13T00:58:52,340 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testExportExpiredSnapshot,1,1734051532214.44a9236bb87948b810c0985660aebd89. 2024-12-13T00:58:52,340 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1922): Closed testExportExpiredSnapshot,1,1734051532214.44a9236bb87948b810c0985660aebd89. 2024-12-13T00:58:52,340 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1635): Region close journal for 44a9236bb87948b810c0985660aebd89: 2024-12-13T00:58:52,346 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=147, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_ADD_TO_META 2024-12-13T00:58:52,347 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testExportExpiredSnapshot,,1734051532214.1fe5c0fc12b23be0acd40e1738aa9284.","families":{"info":[{"qualifier":"regioninfo","vlen":60,"tag":[],"timestamp":"1734051532346"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734051532346"}]},"ts":"1734051532346"} 2024-12-13T00:58:52,347 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testExportExpiredSnapshot,1,1734051532214.44a9236bb87948b810c0985660aebd89.","families":{"info":[{"qualifier":"regioninfo","vlen":60,"tag":[],"timestamp":"1734051532346"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734051532346"}]},"ts":"1734051532346"} 2024-12-13T00:58:52,349 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-13T00:58:52,350 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=147, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-13T00:58:52,350 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734051532350"}]},"ts":"1734051532350"} 2024-12-13T00:58:52,352 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testExportExpiredSnapshot, state=ENABLING in hbase:meta 2024-12-13T00:58:52,356 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(202): Hosts are {9812beb1e4cd=0} racks are {/default-rack=0} 2024-12-13T00:58:52,357 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-13T00:58:52,357 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-13T00:58:52,357 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-13T00:58:52,357 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-13T00:58:52,357 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-13T00:58:52,357 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-13T00:58:52,357 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-13T00:58:52,358 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=148, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=1fe5c0fc12b23be0acd40e1738aa9284, ASSIGN}, {pid=149, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=44a9236bb87948b810c0985660aebd89, ASSIGN}] 2024-12-13T00:58:52,359 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=149, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=44a9236bb87948b810c0985660aebd89, ASSIGN 2024-12-13T00:58:52,359 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=148, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=1fe5c0fc12b23be0acd40e1738aa9284, ASSIGN 2024-12-13T00:58:52,360 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=149, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=44a9236bb87948b810c0985660aebd89, ASSIGN; state=OFFLINE, location=9812beb1e4cd,46075,1734051384875; forceNewPlan=false, retain=false 2024-12-13T00:58:52,360 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=148, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=1fe5c0fc12b23be0acd40e1738aa9284, ASSIGN; state=OFFLINE, location=9812beb1e4cd,40687,1734051385054; forceNewPlan=false, retain=false 2024-12-13T00:58:52,510 INFO [9812beb1e4cd:38293 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-13T00:58:52,510 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=148 updating hbase:meta row=1fe5c0fc12b23be0acd40e1738aa9284, regionState=OPENING, regionLocation=9812beb1e4cd,40687,1734051385054 2024-12-13T00:58:52,510 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=149 updating hbase:meta row=44a9236bb87948b810c0985660aebd89, regionState=OPENING, regionLocation=9812beb1e4cd,46075,1734051384875 2024-12-13T00:58:52,512 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=150, ppid=148, state=RUNNABLE; OpenRegionProcedure 1fe5c0fc12b23be0acd40e1738aa9284, server=9812beb1e4cd,40687,1734051385054}] 2024-12-13T00:58:52,513 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=151, ppid=149, state=RUNNABLE; OpenRegionProcedure 44a9236bb87948b810c0985660aebd89, server=9812beb1e4cd,46075,1734051384875}] 2024-12-13T00:58:52,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=147 2024-12-13T00:58:52,663 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,40687,1734051385054 2024-12-13T00:58:52,665 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,46075,1734051384875 2024-12-13T00:58:52,667 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] handler.AssignRegionHandler(135): Open testExportExpiredSnapshot,1,1734051532214.44a9236bb87948b810c0985660aebd89. 2024-12-13T00:58:52,667 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] handler.AssignRegionHandler(135): Open testExportExpiredSnapshot,,1734051532214.1fe5c0fc12b23be0acd40e1738aa9284. 2024-12-13T00:58:52,667 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(7285): Opening region: {ENCODED => 1fe5c0fc12b23be0acd40e1738aa9284, NAME => 'testExportExpiredSnapshot,,1734051532214.1fe5c0fc12b23be0acd40e1738aa9284.', STARTKEY => '', ENDKEY => '1'} 2024-12-13T00:58:52,667 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(7285): Opening region: {ENCODED => 44a9236bb87948b810c0985660aebd89, NAME => 'testExportExpiredSnapshot,1,1734051532214.44a9236bb87948b810c0985660aebd89.', STARTKEY => '1', ENDKEY => ''} 2024-12-13T00:58:52,668 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(7999): Registered coprocessor service: region=testExportExpiredSnapshot,1,1734051532214.44a9236bb87948b810c0985660aebd89. service=AccessControlService 2024-12-13T00:58:52,668 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(7999): Registered coprocessor service: region=testExportExpiredSnapshot,,1734051532214.1fe5c0fc12b23be0acd40e1738aa9284. service=AccessControlService 2024-12-13T00:58:52,668 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-13T00:58:52,668 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-13T00:58:52,668 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportExpiredSnapshot 44a9236bb87948b810c0985660aebd89 2024-12-13T00:58:52,668 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportExpiredSnapshot 1fe5c0fc12b23be0acd40e1738aa9284 2024-12-13T00:58:52,668 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(894): Instantiated testExportExpiredSnapshot,,1734051532214.1fe5c0fc12b23be0acd40e1738aa9284.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T00:58:52,668 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(894): Instantiated testExportExpiredSnapshot,1,1734051532214.44a9236bb87948b810c0985660aebd89.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T00:58:52,668 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(7327): checking encryption for 1fe5c0fc12b23be0acd40e1738aa9284 2024-12-13T00:58:52,668 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(7327): checking encryption for 44a9236bb87948b810c0985660aebd89 2024-12-13T00:58:52,668 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(7330): checking classloading for 44a9236bb87948b810c0985660aebd89 2024-12-13T00:58:52,668 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(7330): checking classloading for 1fe5c0fc12b23be0acd40e1738aa9284 2024-12-13T00:58:52,670 INFO [StoreOpener-1fe5c0fc12b23be0acd40e1738aa9284-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 1fe5c0fc12b23be0acd40e1738aa9284 2024-12-13T00:58:52,670 INFO [StoreOpener-44a9236bb87948b810c0985660aebd89-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 44a9236bb87948b810c0985660aebd89 2024-12-13T00:58:52,671 INFO [StoreOpener-1fe5c0fc12b23be0acd40e1738aa9284-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1fe5c0fc12b23be0acd40e1738aa9284 columnFamilyName cf 2024-12-13T00:58:52,671 INFO [StoreOpener-44a9236bb87948b810c0985660aebd89-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 44a9236bb87948b810c0985660aebd89 columnFamilyName cf 2024-12-13T00:58:52,671 DEBUG [StoreOpener-1fe5c0fc12b23be0acd40e1738aa9284-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T00:58:52,671 DEBUG [StoreOpener-44a9236bb87948b810c0985660aebd89-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T00:58:52,671 INFO [StoreOpener-1fe5c0fc12b23be0acd40e1738aa9284-1 {}] regionserver.HStore(327): Store=1fe5c0fc12b23be0acd40e1738aa9284/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T00:58:52,671 INFO [StoreOpener-44a9236bb87948b810c0985660aebd89-1 {}] regionserver.HStore(327): Store=44a9236bb87948b810c0985660aebd89/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T00:58:52,672 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testExportExpiredSnapshot/1fe5c0fc12b23be0acd40e1738aa9284 2024-12-13T00:58:52,672 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testExportExpiredSnapshot/44a9236bb87948b810c0985660aebd89 2024-12-13T00:58:52,672 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testExportExpiredSnapshot/1fe5c0fc12b23be0acd40e1738aa9284 2024-12-13T00:58:52,673 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testExportExpiredSnapshot/44a9236bb87948b810c0985660aebd89 2024-12-13T00:58:52,674 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(1085): writing seq id for 1fe5c0fc12b23be0acd40e1738aa9284 2024-12-13T00:58:52,674 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(1085): writing seq id for 44a9236bb87948b810c0985660aebd89 2024-12-13T00:58:52,676 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testExportExpiredSnapshot/44a9236bb87948b810c0985660aebd89/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T00:58:52,676 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testExportExpiredSnapshot/1fe5c0fc12b23be0acd40e1738aa9284/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T00:58:52,677 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(1102): Opened 44a9236bb87948b810c0985660aebd89; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65381973, jitterRate=-0.025732681155204773}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-13T00:58:52,677 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(1102): Opened 1fe5c0fc12b23be0acd40e1738aa9284; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72046701, jitterRate=0.07357950508594513}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-13T00:58:52,678 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(1001): Region open journal for 44a9236bb87948b810c0985660aebd89: 2024-12-13T00:58:52,678 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(1001): Region open journal for 1fe5c0fc12b23be0acd40e1738aa9284: 2024-12-13T00:58:52,679 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegionServer(2601): Post open deploy tasks for testExportExpiredSnapshot,1,1734051532214.44a9236bb87948b810c0985660aebd89., pid=151, masterSystemTime=1734051532664 2024-12-13T00:58:52,679 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegionServer(2601): Post open deploy tasks for testExportExpiredSnapshot,,1734051532214.1fe5c0fc12b23be0acd40e1738aa9284., pid=150, masterSystemTime=1734051532663 2024-12-13T00:58:52,680 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegionServer(2628): Finished post open deploy task for testExportExpiredSnapshot,1,1734051532214.44a9236bb87948b810c0985660aebd89. 2024-12-13T00:58:52,680 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] handler.AssignRegionHandler(164): Opened testExportExpiredSnapshot,1,1734051532214.44a9236bb87948b810c0985660aebd89. 2024-12-13T00:58:52,681 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=149 updating hbase:meta row=44a9236bb87948b810c0985660aebd89, regionState=OPEN, openSeqNum=2, regionLocation=9812beb1e4cd,46075,1734051384875 2024-12-13T00:58:52,681 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegionServer(2628): Finished post open deploy task for testExportExpiredSnapshot,,1734051532214.1fe5c0fc12b23be0acd40e1738aa9284. 2024-12-13T00:58:52,681 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] handler.AssignRegionHandler(164): Opened testExportExpiredSnapshot,,1734051532214.1fe5c0fc12b23be0acd40e1738aa9284. 2024-12-13T00:58:52,682 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=148 updating hbase:meta row=1fe5c0fc12b23be0acd40e1738aa9284, regionState=OPEN, openSeqNum=2, regionLocation=9812beb1e4cd,40687,1734051385054 2024-12-13T00:58:52,683 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=151, resume processing ppid=149 2024-12-13T00:58:52,683 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=151, ppid=149, state=SUCCESS; OpenRegionProcedure 44a9236bb87948b810c0985660aebd89, server=9812beb1e4cd,46075,1734051384875 in 170 msec 2024-12-13T00:58:52,684 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=150, resume processing ppid=148 2024-12-13T00:58:52,684 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=149, ppid=147, state=SUCCESS; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=44a9236bb87948b810c0985660aebd89, ASSIGN in 326 msec 2024-12-13T00:58:52,684 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=150, ppid=148, state=SUCCESS; OpenRegionProcedure 1fe5c0fc12b23be0acd40e1738aa9284, server=9812beb1e4cd,40687,1734051385054 in 171 msec 2024-12-13T00:58:52,685 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=148, resume processing ppid=147 2024-12-13T00:58:52,686 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=148, ppid=147, state=SUCCESS; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=1fe5c0fc12b23be0acd40e1738aa9284, ASSIGN in 327 msec 2024-12-13T00:58:52,686 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=147, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-13T00:58:52,686 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734051532686"}]},"ts":"1734051532686"} 2024-12-13T00:58:52,687 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testExportExpiredSnapshot, state=ENABLED in hbase:meta 2024-12-13T00:58:52,689 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=147, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_POST_OPERATION 2024-12-13T00:58:52,690 DEBUG [PEWorker-5 {}] access.PermissionStorage(175): Writing permission with rowKey testExportExpiredSnapshot jenkins: RWXCA 2024-12-13T00:58:52,691 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40687 {}] access.PermissionStorage(611): Read acl: entry[testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-13T00:58:52,692 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T00:58:52,692 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45231-0x10088dfff310002, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T00:58:52,693 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46075-0x10088dfff310001, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T00:58:52,693 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40687-0x10088dfff310003, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T00:58:52,694 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-13T00:58:52,694 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-13T00:58:52,695 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-13T00:58:52,695 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-13T00:58:52,695 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-13T00:58:52,695 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-13T00:58:52,695 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-13T00:58:52,695 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-13T00:58:52,696 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=147, state=SUCCESS; CreateTableProcedure table=testExportExpiredSnapshot in 480 msec 2024-12-13T00:58:52,772 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-13T00:58:52,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=147 2024-12-13T00:58:52,825 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testExportExpiredSnapshot, procId: 147 completed 2024-12-13T00:58:52,825 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testExportExpiredSnapshot get assigned. Timeout = 60000ms 2024-12-13T00:58:52,825 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-13T00:58:52,829 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testExportExpiredSnapshot assigned to meta. Checking AM states. 2024-12-13T00:58:52,829 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-13T00:58:52,829 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testExportExpiredSnapshot assigned. 2024-12-13T00:58:52,836 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40687 {}] regionserver.HRegion(8254): writing data to region testExportExpiredSnapshot,,1734051532214.1fe5c0fc12b23be0acd40e1738aa9284. with WAL disabled. Data may be lost in the event of a crash. 2024-12-13T00:58:52,838 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46075 {}] regionserver.HRegion(8254): writing data to region testExportExpiredSnapshot,1,1734051532214.44a9236bb87948b810c0985660aebd89. with WAL disabled. Data may be lost in the event of a crash. 2024-12-13T00:58:52,841 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testExportExpiredSnapshot 2024-12-13T00:58:52,841 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testExportExpiredSnapshot,,1734051532214.1fe5c0fc12b23be0acd40e1738aa9284. 2024-12-13T00:58:52,841 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-13T00:58:52,849 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } 2024-12-13T00:58:52,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snapshot-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-12-13T00:58:52,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-13T00:58:52,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x09ef9757 to 127.0.0.1:49424 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@40200758 2024-12-13T00:58:52,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@295ace39, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T00:58:52,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T00:58:52,856 INFO [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40430, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T00:58:52,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x09ef9757 to 127.0.0.1:49424 2024-12-13T00:58:52,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T00:58:52,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4a309b6b to 127.0.0.1:49424 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@aea5fb2 2024-12-13T00:58:52,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@75be73d0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T00:58:52,863 DEBUG [hconnection-0x4d21a4e8-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T00:58:52,863 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40434, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T00:58:52,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4a309b6b to 127.0.0.1:49424 2024-12-13T00:58:52,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T00:58:52,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] access.PermissionStorage(611): Read acl: entry[testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-13T00:58:52,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-13T00:58:52,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] procedure2.ProcedureExecutor(1098): Stored pid=152, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } 2024-12-13T00:58:52,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 }, snapshot procedure id = 152 2024-12-13T00:58:52,868 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_PREPARE 2024-12-13T00:58:52,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-12-13T00:58:52,868 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-13T00:58:52,870 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-13T00:58:52,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742180_1356 (size=152) 2024-12-13T00:58:52,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742180_1356 (size=152) 2024-12-13T00:58:52,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742180_1356 (size=152) 2024-12-13T00:58:52,882 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-13T00:58:52,882 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=153, ppid=152, state=RUNNABLE; SnapshotRegionProcedure 1fe5c0fc12b23be0acd40e1738aa9284}, {pid=154, ppid=152, state=RUNNABLE; SnapshotRegionProcedure 44a9236bb87948b810c0985660aebd89}] 2024-12-13T00:58:52,883 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=154, ppid=152, state=RUNNABLE; SnapshotRegionProcedure 44a9236bb87948b810c0985660aebd89 2024-12-13T00:58:52,883 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=153, ppid=152, state=RUNNABLE; SnapshotRegionProcedure 1fe5c0fc12b23be0acd40e1738aa9284 2024-12-13T00:58:52,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-12-13T00:58:53,033 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,46075,1734051384875 2024-12-13T00:58:53,034 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,40687,1734051385054 2024-12-13T00:58:53,034 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46075 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=154 2024-12-13T00:58:53,034 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40687 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=153 2024-12-13T00:58:53,034 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportExpiredSnapshot,,1734051532214.1fe5c0fc12b23be0acd40e1738aa9284. 2024-12-13T00:58:53,034 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportExpiredSnapshot,1,1734051532214.44a9236bb87948b810c0985660aebd89. 2024-12-13T00:58:53,035 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.HRegion(2837): Flushing 1fe5c0fc12b23be0acd40e1738aa9284 1/1 column families, dataSize=65 B heapSize=400 B 2024-12-13T00:58:53,035 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.HRegion(2837): Flushing 44a9236bb87948b810c0985660aebd89 1/1 column families, dataSize=3.19 KB heapSize=7.14 KB 2024-12-13T00:58:53,051 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testExportExpiredSnapshot/1fe5c0fc12b23be0acd40e1738aa9284/.tmp/cf/ea75111250b447a08ac68af4c0b9fcc3 is 69, key is 03cce5c18a6d29c6fd323651769117fdf/cf:q/1734051532836/Put/seqid=0 2024-12-13T00:58:53,051 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testExportExpiredSnapshot/44a9236bb87948b810c0985660aebd89/.tmp/cf/7b20baa9884b4a36b251414c246137a0 is 71, key is 11072f97bf9880a6d9e1539beaa7c2f2/cf:q/1734051532838/Put/seqid=0 2024-12-13T00:58:53,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742182_1358 (size=5149) 2024-12-13T00:58:53,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742181_1357 (size=8460) 2024-12-13T00:58:53,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742182_1358 (size=5149) 2024-12-13T00:58:53,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742181_1357 (size=8460) 2024-12-13T00:58:53,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742182_1358 (size=5149) 2024-12-13T00:58:53,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742181_1357 (size=8460) 2024-12-13T00:58:53,058 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testExportExpiredSnapshot/1fe5c0fc12b23be0acd40e1738aa9284/.tmp/cf/ea75111250b447a08ac68af4c0b9fcc3 2024-12-13T00:58:53,058 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.19 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testExportExpiredSnapshot/44a9236bb87948b810c0985660aebd89/.tmp/cf/7b20baa9884b4a36b251414c246137a0 2024-12-13T00:58:53,064 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testExportExpiredSnapshot/1fe5c0fc12b23be0acd40e1738aa9284/.tmp/cf/ea75111250b447a08ac68af4c0b9fcc3 as hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testExportExpiredSnapshot/1fe5c0fc12b23be0acd40e1738aa9284/cf/ea75111250b447a08ac68af4c0b9fcc3 2024-12-13T00:58:53,064 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testExportExpiredSnapshot/44a9236bb87948b810c0985660aebd89/.tmp/cf/7b20baa9884b4a36b251414c246137a0 as hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testExportExpiredSnapshot/44a9236bb87948b810c0985660aebd89/cf/7b20baa9884b4a36b251414c246137a0 2024-12-13T00:58:53,069 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testExportExpiredSnapshot/1fe5c0fc12b23be0acd40e1738aa9284/cf/ea75111250b447a08ac68af4c0b9fcc3, entries=1, sequenceid=5, filesize=5.0 K 2024-12-13T00:58:53,069 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testExportExpiredSnapshot/44a9236bb87948b810c0985660aebd89/cf/7b20baa9884b4a36b251414c246137a0, entries=49, sequenceid=5, filesize=8.3 K 2024-12-13T00:58:53,070 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.HRegion(3040): Finished flush of dataSize ~65 B/65, heapSize ~384 B/384, currentSize=0 B/0 for 1fe5c0fc12b23be0acd40e1738aa9284 in 36ms, sequenceid=5, compaction requested=false 2024-12-13T00:58:53,070 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportExpiredSnapshot' 2024-12-13T00:58:53,070 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.HRegion(3040): Finished flush of dataSize ~3.19 KB/3271, heapSize ~7.13 KB/7296, currentSize=0 B/0 for 44a9236bb87948b810c0985660aebd89 in 35ms, sequenceid=5, compaction requested=false 2024-12-13T00:58:53,070 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportExpiredSnapshot' 2024-12-13T00:58:53,070 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.HRegion(2538): Flush status journal for 44a9236bb87948b810c0985660aebd89: 2024-12-13T00:58:53,070 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.HRegion(2538): Flush status journal for 1fe5c0fc12b23be0acd40e1738aa9284: 2024-12-13T00:58:53,070 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportExpiredSnapshot,1,1734051532214.44a9236bb87948b810c0985660aebd89. for snapshot-testExportExpiredSnapshot completed. 2024-12-13T00:58:53,071 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportExpiredSnapshot,,1734051532214.1fe5c0fc12b23be0acd40e1738aa9284. for snapshot-testExportExpiredSnapshot completed. 2024-12-13T00:58:53,071 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] snapshot.SnapshotManifest(241): Storing 'testExportExpiredSnapshot,1,1734051532214.44a9236bb87948b810c0985660aebd89.' region-info for snapshot=snapshot-testExportExpiredSnapshot 2024-12-13T00:58:53,071 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] snapshot.SnapshotManifest(241): Storing 'testExportExpiredSnapshot,,1734051532214.1fe5c0fc12b23be0acd40e1738aa9284.' region-info for snapshot=snapshot-testExportExpiredSnapshot 2024-12-13T00:58:53,071 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-13T00:58:53,071 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-13T00:58:53,071 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testExportExpiredSnapshot/44a9236bb87948b810c0985660aebd89/cf/7b20baa9884b4a36b251414c246137a0] hfiles 2024-12-13T00:58:53,071 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testExportExpiredSnapshot/1fe5c0fc12b23be0acd40e1738aa9284/cf/ea75111250b447a08ac68af4c0b9fcc3] hfiles 2024-12-13T00:58:53,071 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testExportExpiredSnapshot/44a9236bb87948b810c0985660aebd89/cf/7b20baa9884b4a36b251414c246137a0 for snapshot=snapshot-testExportExpiredSnapshot 2024-12-13T00:58:53,071 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testExportExpiredSnapshot/1fe5c0fc12b23be0acd40e1738aa9284/cf/ea75111250b447a08ac68af4c0b9fcc3 for snapshot=snapshot-testExportExpiredSnapshot 2024-12-13T00:58:53,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742183_1359 (size=103) 2024-12-13T00:58:53,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742183_1359 (size=103) 2024-12-13T00:58:53,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742183_1359 (size=103) 2024-12-13T00:58:53,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742184_1360 (size=103) 2024-12-13T00:58:53,079 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportExpiredSnapshot,,1734051532214.1fe5c0fc12b23be0acd40e1738aa9284. 2024-12-13T00:58:53,079 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=153 2024-12-13T00:58:53,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742184_1360 (size=103) 2024-12-13T00:58:53,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742184_1360 (size=103) 2024-12-13T00:58:53,080 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportExpiredSnapshot,1,1734051532214.44a9236bb87948b810c0985660aebd89. 2024-12-13T00:58:53,080 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=154 2024-12-13T00:58:53,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.HMaster(4106): Remote procedure done, pid=153 2024-12-13T00:58:53,080 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportExpiredSnapshot on region 1fe5c0fc12b23be0acd40e1738aa9284 2024-12-13T00:58:53,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] master.HMaster(4106): Remote procedure done, pid=154 2024-12-13T00:58:53,080 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportExpiredSnapshot on region 44a9236bb87948b810c0985660aebd89 2024-12-13T00:58:53,080 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=153, ppid=152, state=RUNNABLE; SnapshotRegionProcedure 1fe5c0fc12b23be0acd40e1738aa9284 2024-12-13T00:58:53,080 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=154, ppid=152, state=RUNNABLE; SnapshotRegionProcedure 44a9236bb87948b810c0985660aebd89 2024-12-13T00:58:53,082 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=153, ppid=152, state=SUCCESS; SnapshotRegionProcedure 1fe5c0fc12b23be0acd40e1738aa9284 in 199 msec 2024-12-13T00:58:53,083 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=154, resume processing ppid=152 2024-12-13T00:58:53,083 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=154, ppid=152, state=SUCCESS; SnapshotRegionProcedure 44a9236bb87948b810c0985660aebd89 in 199 msec 2024-12-13T00:58:53,083 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-13T00:58:53,084 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-13T00:58:53,084 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-13T00:58:53,084 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snapshot-testExportExpiredSnapshot 2024-12-13T00:58:53,085 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot 2024-12-13T00:58:53,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742185_1361 (size=609) 2024-12-13T00:58:53,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742185_1361 (size=609) 2024-12-13T00:58:53,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742185_1361 (size=609) 2024-12-13T00:58:53,104 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-13T00:58:53,110 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-13T00:58:53,110 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot to hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/snapshot-testExportExpiredSnapshot 2024-12-13T00:58:53,111 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_POST_OPERATION 2024-12-13T00:58:53,112 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 }, snapshot procedure id = 152 2024-12-13T00:58:53,113 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=152, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } in 246 msec 2024-12-13T00:58:53,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-12-13T00:58:53,170 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testExportExpiredSnapshot, procId: 152 completed 2024-12-13T00:58:53,288 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734051393214_0006_000001 (auth:SIMPLE) from 127.0.0.1:42084 2024-12-13T00:58:53,305 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/MiniMRCluster_1870862804/yarn-2296387876/MiniMRCluster_1870862804-localDir-nm-0_2/usercache/jenkins/appcache/application_1734051393214_0006/container_1734051393214_0006_01_000001/launch_container.sh] 2024-12-13T00:58:53,305 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/MiniMRCluster_1870862804/yarn-2296387876/MiniMRCluster_1870862804-localDir-nm-0_2/usercache/jenkins/appcache/application_1734051393214_0006/container_1734051393214_0006_01_000001/container_tokens] 2024-12-13T00:58:53,305 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/MiniMRCluster_1870862804/yarn-2296387876/MiniMRCluster_1870862804-localDir-nm-0_2/usercache/jenkins/appcache/application_1734051393214_0006/container_1734051393214_0006_01_000001/sysfs] 2024-12-13T00:58:54,873 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-13T00:58:54,899 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot 2024-12-13T00:58:54,899 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot Metrics about Tables on a single HBase RegionServer 2024-12-13T00:58:54,899 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot 2024-12-13T00:58:54,899 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot Metrics about Tables on a single HBase RegionServer 2024-12-13T00:58:54,900 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-13T00:58:54,900 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion 2024-12-13T00:59:00,402 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-13T00:59:03,178 INFO [Time-limited test {}] snapshot.TestExportSnapshot(468): HDFS export destination path: hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/export-test/export-1734051543178 2024-12-13T00:59:03,178 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=hdfs://localhost:46775, tgtDir=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/export-test/export-1734051543178, rawTgtDir=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/export-test/export-1734051543178, srcFsUri=hdfs://localhost:46775, srcDir=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee 2024-12-13T00:59:03,207 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:46775, inputRoot=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee 2024-12-13T00:59:03,207 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1349531497_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/export-test/export-1734051543178, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/export-test/export-1734051543178/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot 2024-12-13T00:59:03,210 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-13T00:59:03,211 ERROR [Time-limited test {}] util.AbstractHBaseTool(153): Error running command-line tool org.apache.hadoop.hbase.snapshot.SnapshotTTLExpiredException: TTL for snapshot 'snapshot-testExportExpiredSnapshot' has already expired. at org.apache.hadoop.hbase.snapshot.ExportSnapshot.verifySnapshot(ExportSnapshot.java:948) ~[classes/:?] at org.apache.hadoop.hbase.snapshot.ExportSnapshot.doWork(ExportSnapshot.java:1093) ~[classes/:?] at org.apache.hadoop.hbase.util.AbstractHBaseTool.run(AbstractHBaseTool.java:151) ~[hbase-common-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:82) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.runExportSnapshot(TestExportSnapshot.java:523) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportExpiredSnapshot(TestExportSnapshot.java:315) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T00:59:03,212 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testExportExpiredSnapshot 2024-12-13T00:59:03,213 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testExportExpiredSnapshot 2024-12-13T00:59:03,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] procedure2.ProcedureExecutor(1098): Stored pid=155, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-13T00:59:03,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-13T00:59:03,215 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734051543215"}]},"ts":"1734051543215"} 2024-12-13T00:59:03,216 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportExpiredSnapshot, state=DISABLING in hbase:meta 2024-12-13T00:59:03,218 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testExportExpiredSnapshot to state=DISABLING 2024-12-13T00:59:03,219 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=156, ppid=155, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testExportExpiredSnapshot}] 2024-12-13T00:59:03,220 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=157, ppid=156, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=812185a7a28ca83402d75fc2c5894633, UNASSIGN}, {pid=158, ppid=156, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=350758ce3bbce2773a2d63eb3ca1787b, UNASSIGN}] 2024-12-13T00:59:03,220 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=158, ppid=156, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=350758ce3bbce2773a2d63eb3ca1787b, UNASSIGN 2024-12-13T00:59:03,221 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=157, ppid=156, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=812185a7a28ca83402d75fc2c5894633, UNASSIGN 2024-12-13T00:59:03,221 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=157 updating hbase:meta row=812185a7a28ca83402d75fc2c5894633, regionState=CLOSING, regionLocation=9812beb1e4cd,45231,1734051384968 2024-12-13T00:59:03,221 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=158 updating hbase:meta row=350758ce3bbce2773a2d63eb3ca1787b, regionState=CLOSING, regionLocation=9812beb1e4cd,46075,1734051384875 2024-12-13T00:59:03,222 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-13T00:59:03,222 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=159, ppid=157, state=RUNNABLE; CloseRegionProcedure 812185a7a28ca83402d75fc2c5894633, server=9812beb1e4cd,45231,1734051384968}] 2024-12-13T00:59:03,223 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-13T00:59:03,223 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=160, ppid=158, state=RUNNABLE; CloseRegionProcedure 350758ce3bbce2773a2d63eb3ca1787b, server=9812beb1e4cd,46075,1734051384875}] 2024-12-13T00:59:03,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-13T00:59:03,374 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,45231,1734051384968 2024-12-13T00:59:03,374 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] handler.UnassignRegionHandler(124): Close 812185a7a28ca83402d75fc2c5894633 2024-12-13T00:59:03,374 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-13T00:59:03,374 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1681): Closing 812185a7a28ca83402d75fc2c5894633, disabling compactions & flushes 2024-12-13T00:59:03,375 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1703): Closing region testtb-testExportExpiredSnapshot,,1734051530087.812185a7a28ca83402d75fc2c5894633. 2024-12-13T00:59:03,375 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportExpiredSnapshot,,1734051530087.812185a7a28ca83402d75fc2c5894633. 2024-12-13T00:59:03,375 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportExpiredSnapshot,,1734051530087.812185a7a28ca83402d75fc2c5894633. after waiting 0 ms 2024-12-13T00:59:03,375 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportExpiredSnapshot,,1734051530087.812185a7a28ca83402d75fc2c5894633. 2024-12-13T00:59:03,375 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,46075,1734051384875 2024-12-13T00:59:03,375 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] handler.UnassignRegionHandler(124): Close 350758ce3bbce2773a2d63eb3ca1787b 2024-12-13T00:59:03,375 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-13T00:59:03,375 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1681): Closing 350758ce3bbce2773a2d63eb3ca1787b, disabling compactions & flushes 2024-12-13T00:59:03,375 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1703): Closing region testtb-testExportExpiredSnapshot,1,1734051530087.350758ce3bbce2773a2d63eb3ca1787b. 2024-12-13T00:59:03,375 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportExpiredSnapshot,1,1734051530087.350758ce3bbce2773a2d63eb3ca1787b. 2024-12-13T00:59:03,375 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportExpiredSnapshot,1,1734051530087.350758ce3bbce2773a2d63eb3ca1787b. after waiting 0 ms 2024-12-13T00:59:03,375 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportExpiredSnapshot,1,1734051530087.350758ce3bbce2773a2d63eb3ca1787b. 2024-12-13T00:59:03,378 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportExpiredSnapshot/812185a7a28ca83402d75fc2c5894633/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-13T00:59:03,379 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportExpiredSnapshot/350758ce3bbce2773a2d63eb3ca1787b/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-13T00:59:03,379 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-13T00:59:03,379 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-13T00:59:03,379 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1922): Closed testtb-testExportExpiredSnapshot,,1734051530087.812185a7a28ca83402d75fc2c5894633. 2024-12-13T00:59:03,379 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1922): Closed testtb-testExportExpiredSnapshot,1,1734051530087.350758ce3bbce2773a2d63eb3ca1787b. 2024-12-13T00:59:03,379 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1635): Region close journal for 350758ce3bbce2773a2d63eb3ca1787b: 2024-12-13T00:59:03,379 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1635): Region close journal for 812185a7a28ca83402d75fc2c5894633: 2024-12-13T00:59:03,381 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] handler.UnassignRegionHandler(170): Closed 350758ce3bbce2773a2d63eb3ca1787b 2024-12-13T00:59:03,381 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=158 updating hbase:meta row=350758ce3bbce2773a2d63eb3ca1787b, regionState=CLOSED 2024-12-13T00:59:03,381 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] handler.UnassignRegionHandler(170): Closed 812185a7a28ca83402d75fc2c5894633 2024-12-13T00:59:03,382 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=157 updating hbase:meta row=812185a7a28ca83402d75fc2c5894633, regionState=CLOSED 2024-12-13T00:59:03,384 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=160, resume processing ppid=158 2024-12-13T00:59:03,384 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=160, ppid=158, state=SUCCESS; CloseRegionProcedure 350758ce3bbce2773a2d63eb3ca1787b, server=9812beb1e4cd,46075,1734051384875 in 159 msec 2024-12-13T00:59:03,384 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=159, resume processing ppid=157 2024-12-13T00:59:03,385 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=159, ppid=157, state=SUCCESS; CloseRegionProcedure 812185a7a28ca83402d75fc2c5894633, server=9812beb1e4cd,45231,1734051384968 in 161 msec 2024-12-13T00:59:03,385 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=158, ppid=156, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=350758ce3bbce2773a2d63eb3ca1787b, UNASSIGN in 164 msec 2024-12-13T00:59:03,386 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=157, resume processing ppid=156 2024-12-13T00:59:03,386 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=157, ppid=156, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=812185a7a28ca83402d75fc2c5894633, UNASSIGN in 164 msec 2024-12-13T00:59:03,388 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=156, resume processing ppid=155 2024-12-13T00:59:03,388 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=156, ppid=155, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testExportExpiredSnapshot in 167 msec 2024-12-13T00:59:03,389 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734051543388"}]},"ts":"1734051543388"} 2024-12-13T00:59:03,389 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportExpiredSnapshot, state=DISABLED in hbase:meta 2024-12-13T00:59:03,391 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testtb-testExportExpiredSnapshot to state=DISABLED 2024-12-13T00:59:03,392 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=155, state=SUCCESS; DisableTableProcedure table=testtb-testExportExpiredSnapshot in 179 msec 2024-12-13T00:59:03,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-13T00:59:03,517 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testExportExpiredSnapshot, procId: 155 completed 2024-12-13T00:59:03,517 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testExportExpiredSnapshot 2024-12-13T00:59:03,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] procedure2.ProcedureExecutor(1098): Stored pid=161, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-13T00:59:03,519 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=161, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-13T00:59:03,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testExportExpiredSnapshot 2024-12-13T00:59:03,520 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=161, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-13T00:59:03,521 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40687 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testExportExpiredSnapshot 2024-12-13T00:59:03,523 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportExpiredSnapshot/350758ce3bbce2773a2d63eb3ca1787b 2024-12-13T00:59:03,523 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportExpiredSnapshot/812185a7a28ca83402d75fc2c5894633 2024-12-13T00:59:03,524 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-13T00:59:03,524 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40687-0x10088dfff310003, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-13T00:59:03,524 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45231-0x10088dfff310002, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-13T00:59:03,524 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46075-0x10088dfff310001, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-13T00:59:03,525 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-12-13T00:59:03,525 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-12-13T00:59:03,525 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportExpiredSnapshot/350758ce3bbce2773a2d63eb3ca1787b/cf, FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportExpiredSnapshot/350758ce3bbce2773a2d63eb3ca1787b/recovered.edits] 2024-12-13T00:59:03,525 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-12-13T00:59:03,525 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportExpiredSnapshot/812185a7a28ca83402d75fc2c5894633/cf, FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportExpiredSnapshot/812185a7a28ca83402d75fc2c5894633/recovered.edits] 2024-12-13T00:59:03,525 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-12-13T00:59:03,526 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45231-0x10088dfff310002, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-13T00:59:03,526 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-13T00:59:03,527 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T00:59:03,527 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45231-0x10088dfff310002, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T00:59:03,527 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46075-0x10088dfff310001, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-13T00:59:03,527 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46075-0x10088dfff310001, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T00:59:03,527 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40687-0x10088dfff310003, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-13T00:59:03,527 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40687-0x10088dfff310003, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T00:59:03,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-13T00:59:03,528 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-13T00:59:03,528 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-13T00:59:03,528 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-13T00:59:03,528 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-13T00:59:03,530 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportExpiredSnapshot/812185a7a28ca83402d75fc2c5894633/cf/aba444dc0d274802b6124d3e294d41ee to hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/archive/data/default/testtb-testExportExpiredSnapshot/812185a7a28ca83402d75fc2c5894633/cf/aba444dc0d274802b6124d3e294d41ee 2024-12-13T00:59:03,531 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportExpiredSnapshot/350758ce3bbce2773a2d63eb3ca1787b/cf/dbc990e61d7a43e8b6260b0be4f8d51d to hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/archive/data/default/testtb-testExportExpiredSnapshot/350758ce3bbce2773a2d63eb3ca1787b/cf/dbc990e61d7a43e8b6260b0be4f8d51d 2024-12-13T00:59:03,534 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportExpiredSnapshot/350758ce3bbce2773a2d63eb3ca1787b/recovered.edits/9.seqid to hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/archive/data/default/testtb-testExportExpiredSnapshot/350758ce3bbce2773a2d63eb3ca1787b/recovered.edits/9.seqid 2024-12-13T00:59:03,534 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportExpiredSnapshot/812185a7a28ca83402d75fc2c5894633/recovered.edits/9.seqid to hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/archive/data/default/testtb-testExportExpiredSnapshot/812185a7a28ca83402d75fc2c5894633/recovered.edits/9.seqid 2024-12-13T00:59:03,534 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportExpiredSnapshot/350758ce3bbce2773a2d63eb3ca1787b 2024-12-13T00:59:03,534 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportExpiredSnapshot/812185a7a28ca83402d75fc2c5894633 2024-12-13T00:59:03,534 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportExpiredSnapshot regions 2024-12-13T00:59:03,536 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=161, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-13T00:59:03,538 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testExportExpiredSnapshot from hbase:meta 2024-12-13T00:59:03,541 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportExpiredSnapshot' descriptor. 2024-12-13T00:59:03,542 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=161, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-13T00:59:03,542 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportExpiredSnapshot' from region states. 2024-12-13T00:59:03,542 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot,,1734051530087.812185a7a28ca83402d75fc2c5894633.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734051543542"}]},"ts":"9223372036854775807"} 2024-12-13T00:59:03,542 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot,1,1734051530087.350758ce3bbce2773a2d63eb3ca1787b.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734051543542"}]},"ts":"9223372036854775807"} 2024-12-13T00:59:03,544 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-13T00:59:03,544 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 812185a7a28ca83402d75fc2c5894633, NAME => 'testtb-testExportExpiredSnapshot,,1734051530087.812185a7a28ca83402d75fc2c5894633.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 350758ce3bbce2773a2d63eb3ca1787b, NAME => 'testtb-testExportExpiredSnapshot,1,1734051530087.350758ce3bbce2773a2d63eb3ca1787b.', STARTKEY => '1', ENDKEY => ''}] 2024-12-13T00:59:03,544 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportExpiredSnapshot' as deleted. 2024-12-13T00:59:03,544 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734051543544"}]},"ts":"9223372036854775807"} 2024-12-13T00:59:03,546 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testExportExpiredSnapshot state from META 2024-12-13T00:59:03,548 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=161, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-13T00:59:03,549 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=161, state=SUCCESS; DeleteTableProcedure table=testtb-testExportExpiredSnapshot in 30 msec 2024-12-13T00:59:03,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-13T00:59:03,629 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testExportExpiredSnapshot, procId: 161 completed 2024-12-13T00:59:03,638 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportExpiredSnapshot" 2024-12-13T00:59:03,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testExportExpiredSnapshot 2024-12-13T00:59:03,640 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snapshot-testExportExpiredSnapshot" 2024-12-13T00:59:03,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] snapshot.SnapshotManager(380): Deleting snapshot: snapshot-testExportExpiredSnapshot 2024-12-13T00:59:03,643 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportExpiredSnapshot" 2024-12-13T00:59:03,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testExportExpiredSnapshot 2024-12-13T00:59:03,668 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportExpiredSnapshot Thread=790 (was 793), OpenFileDescriptor=767 (was 775), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=574 (was 699), ProcessCount=11 (was 20), AvailableMemoryMB=4500 (was 3672) - AvailableMemoryMB LEAK? - 2024-12-13T00:59:03,668 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=790 is superior to 500 2024-12-13T00:59:03,687 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testEmptyExportFileSystemState Thread=790, OpenFileDescriptor=767, MaxFileDescriptor=1048576, SystemLoadAverage=574, ProcessCount=11, AvailableMemoryMB=4499 2024-12-13T00:59:03,687 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=790 is superior to 500 2024-12-13T00:59:03,688 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-13T00:59:03,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] procedure2.ProcedureExecutor(1098): Stored pid=162, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-13T00:59:03,690 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=162, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_PRE_OPERATION 2024-12-13T00:59:03,690 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T00:59:03,690 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testEmptyExportFileSystemState" procId is: 162 2024-12-13T00:59:03,691 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=162, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-13T00:59:03,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=162 2024-12-13T00:59:03,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742186_1362 (size=412) 2024-12-13T00:59:03,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742186_1362 (size=412) 2024-12-13T00:59:03,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742186_1362 (size=412) 2024-12-13T00:59:03,700 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 872532911785fca45e81aafb65ef2378, NAME => 'testtb-testEmptyExportFileSystemState,,1734051543688.872532911785fca45e81aafb65ef2378.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee 2024-12-13T00:59:03,700 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => fcb94816914bd110b655326b2f519743, NAME => 'testtb-testEmptyExportFileSystemState,1,1734051543688.fcb94816914bd110b655326b2f519743.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee 2024-12-13T00:59:03,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742188_1364 (size=73) 2024-12-13T00:59:03,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742187_1363 (size=73) 2024-12-13T00:59:03,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742188_1364 (size=73) 2024-12-13T00:59:03,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742187_1363 (size=73) 2024-12-13T00:59:03,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742187_1363 (size=73) 2024-12-13T00:59:03,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742188_1364 (size=73) 2024-12-13T00:59:03,708 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testEmptyExportFileSystemState,1,1734051543688.fcb94816914bd110b655326b2f519743.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T00:59:03,708 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1681): Closing fcb94816914bd110b655326b2f519743, disabling compactions & flushes 2024-12-13T00:59:03,708 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testEmptyExportFileSystemState,1,1734051543688.fcb94816914bd110b655326b2f519743. 2024-12-13T00:59:03,709 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testEmptyExportFileSystemState,1,1734051543688.fcb94816914bd110b655326b2f519743. 2024-12-13T00:59:03,709 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testEmptyExportFileSystemState,1,1734051543688.fcb94816914bd110b655326b2f519743. after waiting 0 ms 2024-12-13T00:59:03,709 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testEmptyExportFileSystemState,1,1734051543688.fcb94816914bd110b655326b2f519743. 2024-12-13T00:59:03,709 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testEmptyExportFileSystemState,,1734051543688.872532911785fca45e81aafb65ef2378.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T00:59:03,709 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testEmptyExportFileSystemState,1,1734051543688.fcb94816914bd110b655326b2f519743. 2024-12-13T00:59:03,709 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1635): Region close journal for fcb94816914bd110b655326b2f519743: 2024-12-13T00:59:03,709 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1681): Closing 872532911785fca45e81aafb65ef2378, disabling compactions & flushes 2024-12-13T00:59:03,709 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testEmptyExportFileSystemState,,1734051543688.872532911785fca45e81aafb65ef2378. 2024-12-13T00:59:03,709 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testEmptyExportFileSystemState,,1734051543688.872532911785fca45e81aafb65ef2378. 2024-12-13T00:59:03,709 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testEmptyExportFileSystemState,,1734051543688.872532911785fca45e81aafb65ef2378. after waiting 0 ms 2024-12-13T00:59:03,709 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testEmptyExportFileSystemState,,1734051543688.872532911785fca45e81aafb65ef2378. 2024-12-13T00:59:03,709 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testEmptyExportFileSystemState,,1734051543688.872532911785fca45e81aafb65ef2378. 2024-12-13T00:59:03,709 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1635): Region close journal for 872532911785fca45e81aafb65ef2378: 2024-12-13T00:59:03,710 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=162, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_ADD_TO_META 2024-12-13T00:59:03,710 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testEmptyExportFileSystemState,1,1734051543688.fcb94816914bd110b655326b2f519743.","families":{"info":[{"qualifier":"regioninfo","vlen":72,"tag":[],"timestamp":"1734051543710"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734051543710"}]},"ts":"1734051543710"} 2024-12-13T00:59:03,710 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testEmptyExportFileSystemState,,1734051543688.872532911785fca45e81aafb65ef2378.","families":{"info":[{"qualifier":"regioninfo","vlen":72,"tag":[],"timestamp":"1734051543710"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734051543710"}]},"ts":"1734051543710"} 2024-12-13T00:59:03,712 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-13T00:59:03,713 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=162, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-13T00:59:03,713 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734051543713"}]},"ts":"1734051543713"} 2024-12-13T00:59:03,714 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testEmptyExportFileSystemState, state=ENABLING in hbase:meta 2024-12-13T00:59:03,717 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(202): Hosts are {9812beb1e4cd=0} racks are {/default-rack=0} 2024-12-13T00:59:03,718 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-13T00:59:03,718 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-13T00:59:03,718 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-13T00:59:03,718 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-13T00:59:03,718 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-13T00:59:03,718 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-13T00:59:03,718 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-13T00:59:03,718 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=163, ppid=162, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=872532911785fca45e81aafb65ef2378, ASSIGN}, {pid=164, ppid=162, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=fcb94816914bd110b655326b2f519743, ASSIGN}] 2024-12-13T00:59:03,719 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=164, ppid=162, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=fcb94816914bd110b655326b2f519743, ASSIGN 2024-12-13T00:59:03,719 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=163, ppid=162, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=872532911785fca45e81aafb65ef2378, ASSIGN 2024-12-13T00:59:03,720 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=163, ppid=162, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=872532911785fca45e81aafb65ef2378, ASSIGN; state=OFFLINE, location=9812beb1e4cd,45231,1734051384968; forceNewPlan=false, retain=false 2024-12-13T00:59:03,720 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=164, ppid=162, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=fcb94816914bd110b655326b2f519743, ASSIGN; state=OFFLINE, location=9812beb1e4cd,46075,1734051384875; forceNewPlan=false, retain=false 2024-12-13T00:59:03,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=162 2024-12-13T00:59:03,870 INFO [9812beb1e4cd:38293 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-13T00:59:03,870 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=163 updating hbase:meta row=872532911785fca45e81aafb65ef2378, regionState=OPENING, regionLocation=9812beb1e4cd,45231,1734051384968 2024-12-13T00:59:03,870 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=164 updating hbase:meta row=fcb94816914bd110b655326b2f519743, regionState=OPENING, regionLocation=9812beb1e4cd,46075,1734051384875 2024-12-13T00:59:03,872 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=165, ppid=164, state=RUNNABLE; OpenRegionProcedure fcb94816914bd110b655326b2f519743, server=9812beb1e4cd,46075,1734051384875}] 2024-12-13T00:59:03,873 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=166, ppid=163, state=RUNNABLE; OpenRegionProcedure 872532911785fca45e81aafb65ef2378, server=9812beb1e4cd,45231,1734051384968}] 2024-12-13T00:59:03,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=162 2024-12-13T00:59:04,024 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,46075,1734051384875 2024-12-13T00:59:04,024 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,45231,1734051384968 2024-12-13T00:59:04,027 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] handler.AssignRegionHandler(135): Open testtb-testEmptyExportFileSystemState,,1734051543688.872532911785fca45e81aafb65ef2378. 2024-12-13T00:59:04,027 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] handler.AssignRegionHandler(135): Open testtb-testEmptyExportFileSystemState,1,1734051543688.fcb94816914bd110b655326b2f519743. 2024-12-13T00:59:04,027 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(7285): Opening region: {ENCODED => 872532911785fca45e81aafb65ef2378, NAME => 'testtb-testEmptyExportFileSystemState,,1734051543688.872532911785fca45e81aafb65ef2378.', STARTKEY => '', ENDKEY => '1'} 2024-12-13T00:59:04,027 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(7285): Opening region: {ENCODED => fcb94816914bd110b655326b2f519743, NAME => 'testtb-testEmptyExportFileSystemState,1,1734051543688.fcb94816914bd110b655326b2f519743.', STARTKEY => '1', ENDKEY => ''} 2024-12-13T00:59:04,027 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testEmptyExportFileSystemState,,1734051543688.872532911785fca45e81aafb65ef2378. service=AccessControlService 2024-12-13T00:59:04,027 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testEmptyExportFileSystemState,1,1734051543688.fcb94816914bd110b655326b2f519743. service=AccessControlService 2024-12-13T00:59:04,028 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-13T00:59:04,028 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-13T00:59:04,028 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testEmptyExportFileSystemState 872532911785fca45e81aafb65ef2378 2024-12-13T00:59:04,028 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testEmptyExportFileSystemState fcb94816914bd110b655326b2f519743 2024-12-13T00:59:04,028 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(894): Instantiated testtb-testEmptyExportFileSystemState,,1734051543688.872532911785fca45e81aafb65ef2378.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T00:59:04,028 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(894): Instantiated testtb-testEmptyExportFileSystemState,1,1734051543688.fcb94816914bd110b655326b2f519743.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T00:59:04,028 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(7327): checking encryption for 872532911785fca45e81aafb65ef2378 2024-12-13T00:59:04,028 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(7327): checking encryption for fcb94816914bd110b655326b2f519743 2024-12-13T00:59:04,028 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(7330): checking classloading for 872532911785fca45e81aafb65ef2378 2024-12-13T00:59:04,028 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(7330): checking classloading for fcb94816914bd110b655326b2f519743 2024-12-13T00:59:04,029 INFO [StoreOpener-fcb94816914bd110b655326b2f519743-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region fcb94816914bd110b655326b2f519743 2024-12-13T00:59:04,029 INFO [StoreOpener-872532911785fca45e81aafb65ef2378-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 872532911785fca45e81aafb65ef2378 2024-12-13T00:59:04,031 INFO [StoreOpener-fcb94816914bd110b655326b2f519743-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region fcb94816914bd110b655326b2f519743 columnFamilyName cf 2024-12-13T00:59:04,031 INFO [StoreOpener-872532911785fca45e81aafb65ef2378-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 872532911785fca45e81aafb65ef2378 columnFamilyName cf 2024-12-13T00:59:04,031 DEBUG [StoreOpener-fcb94816914bd110b655326b2f519743-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T00:59:04,031 DEBUG [StoreOpener-872532911785fca45e81aafb65ef2378-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T00:59:04,031 INFO [StoreOpener-fcb94816914bd110b655326b2f519743-1 {}] regionserver.HStore(327): Store=fcb94816914bd110b655326b2f519743/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T00:59:04,031 INFO [StoreOpener-872532911785fca45e81aafb65ef2378-1 {}] regionserver.HStore(327): Store=872532911785fca45e81aafb65ef2378/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T00:59:04,032 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testEmptyExportFileSystemState/872532911785fca45e81aafb65ef2378 2024-12-13T00:59:04,032 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testEmptyExportFileSystemState/fcb94816914bd110b655326b2f519743 2024-12-13T00:59:04,033 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testEmptyExportFileSystemState/872532911785fca45e81aafb65ef2378 2024-12-13T00:59:04,033 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testEmptyExportFileSystemState/fcb94816914bd110b655326b2f519743 2024-12-13T00:59:04,035 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(1085): writing seq id for 872532911785fca45e81aafb65ef2378 2024-12-13T00:59:04,035 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(1085): writing seq id for fcb94816914bd110b655326b2f519743 2024-12-13T00:59:04,037 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testEmptyExportFileSystemState/fcb94816914bd110b655326b2f519743/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T00:59:04,037 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testEmptyExportFileSystemState/872532911785fca45e81aafb65ef2378/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T00:59:04,037 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(1102): Opened fcb94816914bd110b655326b2f519743; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60113620, jitterRate=-0.10423725843429565}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-13T00:59:04,037 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(1102): Opened 872532911785fca45e81aafb65ef2378; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69584993, jitterRate=0.0368971973657608}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-13T00:59:04,038 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(1001): Region open journal for fcb94816914bd110b655326b2f519743: 2024-12-13T00:59:04,038 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(1001): Region open journal for 872532911785fca45e81aafb65ef2378: 2024-12-13T00:59:04,038 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testEmptyExportFileSystemState,,1734051543688.872532911785fca45e81aafb65ef2378., pid=166, masterSystemTime=1734051544024 2024-12-13T00:59:04,038 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testEmptyExportFileSystemState,1,1734051543688.fcb94816914bd110b655326b2f519743., pid=165, masterSystemTime=1734051544024 2024-12-13T00:59:04,040 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testEmptyExportFileSystemState,,1734051543688.872532911785fca45e81aafb65ef2378. 2024-12-13T00:59:04,040 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] handler.AssignRegionHandler(164): Opened testtb-testEmptyExportFileSystemState,,1734051543688.872532911785fca45e81aafb65ef2378. 2024-12-13T00:59:04,040 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=163 updating hbase:meta row=872532911785fca45e81aafb65ef2378, regionState=OPEN, openSeqNum=2, regionLocation=9812beb1e4cd,45231,1734051384968 2024-12-13T00:59:04,040 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testEmptyExportFileSystemState,1,1734051543688.fcb94816914bd110b655326b2f519743. 2024-12-13T00:59:04,040 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] handler.AssignRegionHandler(164): Opened testtb-testEmptyExportFileSystemState,1,1734051543688.fcb94816914bd110b655326b2f519743. 2024-12-13T00:59:04,041 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=164 updating hbase:meta row=fcb94816914bd110b655326b2f519743, regionState=OPEN, openSeqNum=2, regionLocation=9812beb1e4cd,46075,1734051384875 2024-12-13T00:59:04,042 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=166, resume processing ppid=163 2024-12-13T00:59:04,043 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=166, ppid=163, state=SUCCESS; OpenRegionProcedure 872532911785fca45e81aafb65ef2378, server=9812beb1e4cd,45231,1734051384968 in 168 msec 2024-12-13T00:59:04,043 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=165, resume processing ppid=164 2024-12-13T00:59:04,043 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=165, ppid=164, state=SUCCESS; OpenRegionProcedure fcb94816914bd110b655326b2f519743, server=9812beb1e4cd,46075,1734051384875 in 170 msec 2024-12-13T00:59:04,044 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=163, ppid=162, state=SUCCESS; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=872532911785fca45e81aafb65ef2378, ASSIGN in 325 msec 2024-12-13T00:59:04,044 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=164, resume processing ppid=162 2024-12-13T00:59:04,044 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=164, ppid=162, state=SUCCESS; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=fcb94816914bd110b655326b2f519743, ASSIGN in 325 msec 2024-12-13T00:59:04,045 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=162, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-13T00:59:04,045 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734051544045"}]},"ts":"1734051544045"} 2024-12-13T00:59:04,046 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testEmptyExportFileSystemState, state=ENABLED in hbase:meta 2024-12-13T00:59:04,048 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=162, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_POST_OPERATION 2024-12-13T00:59:04,049 DEBUG [PEWorker-3 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testEmptyExportFileSystemState jenkins: RWXCA 2024-12-13T00:59:04,050 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40687 {}] access.PermissionStorage(611): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-12-13T00:59:04,057 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46075-0x10088dfff310001, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T00:59:04,057 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T00:59:04,057 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40687-0x10088dfff310003, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T00:59:04,057 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45231-0x10088dfff310002, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T00:59:04,059 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-13T00:59:04,059 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-13T00:59:04,059 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-13T00:59:04,059 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-13T00:59:04,059 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-13T00:59:04,059 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-13T00:59:04,059 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-13T00:59:04,059 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-13T00:59:04,060 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=162, state=SUCCESS; CreateTableProcedure table=testtb-testEmptyExportFileSystemState in 370 msec 2024-12-13T00:59:04,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=162 2024-12-13T00:59:04,294 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testEmptyExportFileSystemState, procId: 162 completed 2024-12-13T00:59:04,294 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testtb-testEmptyExportFileSystemState get assigned. Timeout = 60000ms 2024-12-13T00:59:04,294 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-13T00:59:04,297 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testtb-testEmptyExportFileSystemState assigned to meta. Checking AM states. 2024-12-13T00:59:04,297 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-13T00:59:04,297 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testtb-testEmptyExportFileSystemState assigned. 2024-12-13T00:59:04,300 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-13T00:59:04,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734051544300 (current time:1734051544300). 2024-12-13T00:59:04,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-13T00:59:04,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testEmptyExportFileSystemState VERSION not specified, setting to 2 2024-12-13T00:59:04,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-13T00:59:04,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x266d562b to 127.0.0.1:49424 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@d3cf3f6 2024-12-13T00:59:04,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@21b4364, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T00:59:04,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T00:59:04,306 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59316, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T00:59:04,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x266d562b to 127.0.0.1:49424 2024-12-13T00:59:04,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T00:59:04,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3efe7195 to 127.0.0.1:49424 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@543796ff 2024-12-13T00:59:04,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@c6fff7e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T00:59:04,312 DEBUG [hconnection-0xb69ba4b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T00:59:04,313 INFO [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59330, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T00:59:04,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3efe7195 to 127.0.0.1:49424 2024-12-13T00:59:04,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T00:59:04,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] access.PermissionStorage(611): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-12-13T00:59:04,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-13T00:59:04,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] procedure2.ProcedureExecutor(1098): Stored pid=167, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-13T00:59:04,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 167 2024-12-13T00:59:04,317 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-13T00:59:04,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-13T00:59:04,318 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-13T00:59:04,320 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-13T00:59:04,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742189_1365 (size=185) 2024-12-13T00:59:04,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742189_1365 (size=185) 2024-12-13T00:59:04,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742189_1365 (size=185) 2024-12-13T00:59:04,327 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-13T00:59:04,327 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=168, ppid=167, state=RUNNABLE; SnapshotRegionProcedure 872532911785fca45e81aafb65ef2378}, {pid=169, ppid=167, state=RUNNABLE; SnapshotRegionProcedure fcb94816914bd110b655326b2f519743}] 2024-12-13T00:59:04,327 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=169, ppid=167, state=RUNNABLE; SnapshotRegionProcedure fcb94816914bd110b655326b2f519743 2024-12-13T00:59:04,328 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=168, ppid=167, state=RUNNABLE; SnapshotRegionProcedure 872532911785fca45e81aafb65ef2378 2024-12-13T00:59:04,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-13T00:59:04,478 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,46075,1734051384875 2024-12-13T00:59:04,478 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,45231,1734051384968 2024-12-13T00:59:04,479 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45231 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=168 2024-12-13T00:59:04,479 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46075 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=169 2024-12-13T00:59:04,479 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=168}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,,1734051543688.872532911785fca45e81aafb65ef2378. 2024-12-13T00:59:04,479 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=169}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,1,1734051543688.fcb94816914bd110b655326b2f519743. 2024-12-13T00:59:04,479 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=169}] regionserver.HRegion(2538): Flush status journal for fcb94816914bd110b655326b2f519743: 2024-12-13T00:59:04,479 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=168}] regionserver.HRegion(2538): Flush status journal for 872532911785fca45e81aafb65ef2378: 2024-12-13T00:59:04,479 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=169}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,1,1734051543688.fcb94816914bd110b655326b2f519743. for emptySnaptb0-testEmptyExportFileSystemState completed. 2024-12-13T00:59:04,479 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=168}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,,1734051543688.872532911785fca45e81aafb65ef2378. for emptySnaptb0-testEmptyExportFileSystemState completed. 2024-12-13T00:59:04,480 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=169}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,1,1734051543688.fcb94816914bd110b655326b2f519743.' region-info for snapshot=emptySnaptb0-testEmptyExportFileSystemState 2024-12-13T00:59:04,480 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=168}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,,1734051543688.872532911785fca45e81aafb65ef2378.' region-info for snapshot=emptySnaptb0-testEmptyExportFileSystemState 2024-12-13T00:59:04,480 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=169}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-13T00:59:04,480 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=168}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-13T00:59:04,480 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=168}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-13T00:59:04,480 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=169}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-13T00:59:04,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742190_1366 (size=76) 2024-12-13T00:59:04,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742191_1367 (size=76) 2024-12-13T00:59:04,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742190_1366 (size=76) 2024-12-13T00:59:04,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742190_1366 (size=76) 2024-12-13T00:59:04,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742191_1367 (size=76) 2024-12-13T00:59:04,488 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=168}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,,1734051543688.872532911785fca45e81aafb65ef2378. 2024-12-13T00:59:04,488 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=168 2024-12-13T00:59:04,488 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=169}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,1,1734051543688.fcb94816914bd110b655326b2f519743. 2024-12-13T00:59:04,488 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=169}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=169 2024-12-13T00:59:04,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742191_1367 (size=76) 2024-12-13T00:59:04,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] master.HMaster(4106): Remote procedure done, pid=168 2024-12-13T00:59:04,489 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testEmptyExportFileSystemState on region 872532911785fca45e81aafb65ef2378 2024-12-13T00:59:04,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.HMaster(4106): Remote procedure done, pid=169 2024-12-13T00:59:04,489 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testEmptyExportFileSystemState on region fcb94816914bd110b655326b2f519743 2024-12-13T00:59:04,489 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=168, ppid=167, state=RUNNABLE; SnapshotRegionProcedure 872532911785fca45e81aafb65ef2378 2024-12-13T00:59:04,489 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=169, ppid=167, state=RUNNABLE; SnapshotRegionProcedure fcb94816914bd110b655326b2f519743 2024-12-13T00:59:04,490 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=168, ppid=167, state=SUCCESS; SnapshotRegionProcedure 872532911785fca45e81aafb65ef2378 in 163 msec 2024-12-13T00:59:04,491 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=169, resume processing ppid=167 2024-12-13T00:59:04,491 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=169, ppid=167, state=SUCCESS; SnapshotRegionProcedure fcb94816914bd110b655326b2f519743 in 163 msec 2024-12-13T00:59:04,491 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-13T00:59:04,492 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-13T00:59:04,492 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-13T00:59:04,492 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testEmptyExportFileSystemState 2024-12-13T00:59:04,493 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-12-13T00:59:04,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742192_1368 (size=567) 2024-12-13T00:59:04,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742192_1368 (size=567) 2024-12-13T00:59:04,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742192_1368 (size=567) 2024-12-13T00:59:04,503 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-13T00:59:04,508 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-13T00:59:04,509 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState to hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-12-13T00:59:04,513 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-13T00:59:04,513 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 167 2024-12-13T00:59:04,514 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=167, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } in 197 msec 2024-12-13T00:59:04,530 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState 2024-12-13T00:59:04,530 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState Metrics about Tables on a single HBase RegionServer 2024-12-13T00:59:04,530 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot 2024-12-13T00:59:04,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-13T00:59:04,620 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testEmptyExportFileSystemState, procId: 167 completed 2024-12-13T00:59:04,627 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45231 {}] regionserver.HRegion(8254): writing data to region testtb-testEmptyExportFileSystemState,,1734051543688.872532911785fca45e81aafb65ef2378. with WAL disabled. Data may be lost in the event of a crash. 2024-12-13T00:59:04,628 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46075 {}] regionserver.HRegion(8254): writing data to region testtb-testEmptyExportFileSystemState,1,1734051543688.fcb94816914bd110b655326b2f519743. with WAL disabled. Data may be lost in the event of a crash. 2024-12-13T00:59:04,631 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testEmptyExportFileSystemState 2024-12-13T00:59:04,631 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testEmptyExportFileSystemState,,1734051543688.872532911785fca45e81aafb65ef2378. 2024-12-13T00:59:04,631 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-13T00:59:04,643 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-13T00:59:04,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734051544643 (current time:1734051544643). 2024-12-13T00:59:04,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-13T00:59:04,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testEmptyExportFileSystemState VERSION not specified, setting to 2 2024-12-13T00:59:04,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-13T00:59:04,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5d6d85b3 to 127.0.0.1:49424 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1d2b7c12 2024-12-13T00:59:04,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3aa761f6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T00:59:04,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T00:59:04,651 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59340, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T00:59:04,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5d6d85b3 to 127.0.0.1:49424 2024-12-13T00:59:04,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T00:59:04,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0a730d80 to 127.0.0.1:49424 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4c7e2b30 2024-12-13T00:59:04,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4d574b98, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T00:59:04,662 DEBUG [hconnection-0x64464ddb-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T00:59:04,663 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59346, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T00:59:04,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0a730d80 to 127.0.0.1:49424 2024-12-13T00:59:04,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T00:59:04,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] access.PermissionStorage(611): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-12-13T00:59:04,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-13T00:59:04,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] procedure2.ProcedureExecutor(1098): Stored pid=170, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-13T00:59:04,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 170 2024-12-13T00:59:04,668 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-13T00:59:04,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=170 2024-12-13T00:59:04,670 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-13T00:59:04,672 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-13T00:59:04,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742193_1369 (size=180) 2024-12-13T00:59:04,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742193_1369 (size=180) 2024-12-13T00:59:04,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742193_1369 (size=180) 2024-12-13T00:59:04,683 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-13T00:59:04,683 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=171, ppid=170, state=RUNNABLE; SnapshotRegionProcedure 872532911785fca45e81aafb65ef2378}, {pid=172, ppid=170, state=RUNNABLE; SnapshotRegionProcedure fcb94816914bd110b655326b2f519743}] 2024-12-13T00:59:04,684 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=172, ppid=170, state=RUNNABLE; SnapshotRegionProcedure fcb94816914bd110b655326b2f519743 2024-12-13T00:59:04,684 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=171, ppid=170, state=RUNNABLE; SnapshotRegionProcedure 872532911785fca45e81aafb65ef2378 2024-12-13T00:59:04,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=170 2024-12-13T00:59:04,836 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,45231,1734051384968 2024-12-13T00:59:04,836 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,46075,1734051384875 2024-12-13T00:59:04,837 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45231 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=171 2024-12-13T00:59:04,837 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46075 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=172 2024-12-13T00:59:04,837 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,1,1734051543688.fcb94816914bd110b655326b2f519743. 2024-12-13T00:59:04,837 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,,1734051543688.872532911785fca45e81aafb65ef2378. 2024-12-13T00:59:04,837 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.HRegion(2837): Flushing 872532911785fca45e81aafb65ef2378 1/1 column families, dataSize=333 B heapSize=976 B 2024-12-13T00:59:04,837 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.HRegion(2837): Flushing fcb94816914bd110b655326b2f519743 1/1 column families, dataSize=2.93 KB heapSize=6.58 KB 2024-12-13T00:59:04,853 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testEmptyExportFileSystemState/fcb94816914bd110b655326b2f519743/.tmp/cf/27a56663c8314476a1d47ad250c6d6bf is 71, key is 118148d50d4c211570312a58c726e5c2/cf:q/1734051544628/Put/seqid=0 2024-12-13T00:59:04,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742194_1370 (size=8188) 2024-12-13T00:59:04,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742194_1370 (size=8188) 2024-12-13T00:59:04,859 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testEmptyExportFileSystemState/872532911785fca45e81aafb65ef2378/.tmp/cf/591e9eaba9d946b5bb097b48e9bf9a69 is 71, key is 05035747e8772fd81dade039377cb92f/cf:q/1734051544627/Put/seqid=0 2024-12-13T00:59:04,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742194_1370 (size=8188) 2024-12-13T00:59:04,860 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.93 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testEmptyExportFileSystemState/fcb94816914bd110b655326b2f519743/.tmp/cf/27a56663c8314476a1d47ad250c6d6bf 2024-12-13T00:59:04,865 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testEmptyExportFileSystemState/fcb94816914bd110b655326b2f519743/.tmp/cf/27a56663c8314476a1d47ad250c6d6bf as hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testEmptyExportFileSystemState/fcb94816914bd110b655326b2f519743/cf/27a56663c8314476a1d47ad250c6d6bf 2024-12-13T00:59:04,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742195_1371 (size=5422) 2024-12-13T00:59:04,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742195_1371 (size=5422) 2024-12-13T00:59:04,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742195_1371 (size=5422) 2024-12-13T00:59:04,870 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=333 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testEmptyExportFileSystemState/872532911785fca45e81aafb65ef2378/.tmp/cf/591e9eaba9d946b5bb097b48e9bf9a69 2024-12-13T00:59:04,871 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testEmptyExportFileSystemState/fcb94816914bd110b655326b2f519743/cf/27a56663c8314476a1d47ad250c6d6bf, entries=45, sequenceid=6, filesize=8.0 K 2024-12-13T00:59:04,872 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.HRegion(3040): Finished flush of dataSize ~2.93 KB/3003, heapSize ~6.56 KB/6720, currentSize=0 B/0 for fcb94816914bd110b655326b2f519743 in 34ms, sequenceid=6, compaction requested=false 2024-12-13T00:59:04,872 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testEmptyExportFileSystemState' 2024-12-13T00:59:04,872 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.HRegion(2538): Flush status journal for fcb94816914bd110b655326b2f519743: 2024-12-13T00:59:04,872 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,1,1734051543688.fcb94816914bd110b655326b2f519743. for snaptb0-testEmptyExportFileSystemState completed. 2024-12-13T00:59:04,873 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,1,1734051543688.fcb94816914bd110b655326b2f519743.' region-info for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-13T00:59:04,873 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-13T00:59:04,873 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testEmptyExportFileSystemState/fcb94816914bd110b655326b2f519743/cf/27a56663c8314476a1d47ad250c6d6bf] hfiles 2024-12-13T00:59:04,873 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testEmptyExportFileSystemState/fcb94816914bd110b655326b2f519743/cf/27a56663c8314476a1d47ad250c6d6bf for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-13T00:59:04,875 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testEmptyExportFileSystemState/872532911785fca45e81aafb65ef2378/.tmp/cf/591e9eaba9d946b5bb097b48e9bf9a69 as hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testEmptyExportFileSystemState/872532911785fca45e81aafb65ef2378/cf/591e9eaba9d946b5bb097b48e9bf9a69 2024-12-13T00:59:04,880 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testEmptyExportFileSystemState/872532911785fca45e81aafb65ef2378/cf/591e9eaba9d946b5bb097b48e9bf9a69, entries=5, sequenceid=6, filesize=5.3 K 2024-12-13T00:59:04,881 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.HRegion(3040): Finished flush of dataSize ~333 B/333, heapSize ~960 B/960, currentSize=0 B/0 for 872532911785fca45e81aafb65ef2378 in 44ms, sequenceid=6, compaction requested=false 2024-12-13T00:59:04,881 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.HRegion(2538): Flush status journal for 872532911785fca45e81aafb65ef2378: 2024-12-13T00:59:04,881 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,,1734051543688.872532911785fca45e81aafb65ef2378. for snaptb0-testEmptyExportFileSystemState completed. 2024-12-13T00:59:04,881 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,,1734051543688.872532911785fca45e81aafb65ef2378.' region-info for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-13T00:59:04,881 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-13T00:59:04,881 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testEmptyExportFileSystemState/872532911785fca45e81aafb65ef2378/cf/591e9eaba9d946b5bb097b48e9bf9a69] hfiles 2024-12-13T00:59:04,881 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testEmptyExportFileSystemState/872532911785fca45e81aafb65ef2378/cf/591e9eaba9d946b5bb097b48e9bf9a69 for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-13T00:59:04,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742196_1372 (size=115) 2024-12-13T00:59:04,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742196_1372 (size=115) 2024-12-13T00:59:04,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742196_1372 (size=115) 2024-12-13T00:59:04,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742197_1373 (size=115) 2024-12-13T00:59:04,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742197_1373 (size=115) 2024-12-13T00:59:04,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742197_1373 (size=115) 2024-12-13T00:59:04,890 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,,1734051543688.872532911785fca45e81aafb65ef2378. 2024-12-13T00:59:04,890 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=171 2024-12-13T00:59:04,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.HMaster(4106): Remote procedure done, pid=171 2024-12-13T00:59:04,891 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testEmptyExportFileSystemState on region 872532911785fca45e81aafb65ef2378 2024-12-13T00:59:04,891 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=171, ppid=170, state=RUNNABLE; SnapshotRegionProcedure 872532911785fca45e81aafb65ef2378 2024-12-13T00:59:04,892 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=171, ppid=170, state=SUCCESS; SnapshotRegionProcedure 872532911785fca45e81aafb65ef2378 in 208 msec 2024-12-13T00:59:04,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=170 2024-12-13T00:59:05,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=170 2024-12-13T00:59:05,283 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,1,1734051543688.fcb94816914bd110b655326b2f519743. 2024-12-13T00:59:05,283 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=172 2024-12-13T00:59:05,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.HMaster(4106): Remote procedure done, pid=172 2024-12-13T00:59:05,283 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testEmptyExportFileSystemState on region fcb94816914bd110b655326b2f519743 2024-12-13T00:59:05,283 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=172, ppid=170, state=RUNNABLE; SnapshotRegionProcedure fcb94816914bd110b655326b2f519743 2024-12-13T00:59:05,285 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=172, resume processing ppid=170 2024-12-13T00:59:05,286 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-13T00:59:05,286 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=172, ppid=170, state=SUCCESS; SnapshotRegionProcedure fcb94816914bd110b655326b2f519743 in 601 msec 2024-12-13T00:59:05,286 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-13T00:59:05,287 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-13T00:59:05,287 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testEmptyExportFileSystemState 2024-12-13T00:59:05,287 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/.tmp/snaptb0-testEmptyExportFileSystemState 2024-12-13T00:59:05,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742198_1374 (size=645) 2024-12-13T00:59:05,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742198_1374 (size=645) 2024-12-13T00:59:05,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742198_1374 (size=645) 2024-12-13T00:59:05,298 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-13T00:59:05,302 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-13T00:59:05,303 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/.tmp/snaptb0-testEmptyExportFileSystemState to hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/snaptb0-testEmptyExportFileSystemState 2024-12-13T00:59:05,304 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-13T00:59:05,304 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 170 2024-12-13T00:59:05,305 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=170, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } in 637 msec 2024-12-13T00:59:05,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=170 2024-12-13T00:59:05,773 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testEmptyExportFileSystemState, procId: 170 completed 2024-12-13T00:59:05,773 INFO [Time-limited test {}] snapshot.TestExportSnapshot(468): HDFS export destination path: hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/export-test/export-1734051545773 2024-12-13T00:59:05,773 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=hdfs://localhost:46775, tgtDir=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/export-test/export-1734051545773, rawTgtDir=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/export-test/export-1734051545773, srcFsUri=hdfs://localhost:46775, srcDir=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee 2024-12-13T00:59:05,811 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:46775, inputRoot=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee 2024-12-13T00:59:05,811 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1349531497_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/export-test/export-1734051545773, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/export-test/export-1734051545773/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-12-13T00:59:05,813 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-13T00:59:05,817 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState to hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/export-test/export-1734051545773/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-12-13T00:59:05,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742199_1375 (size=185) 2024-12-13T00:59:05,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742199_1375 (size=185) 2024-12-13T00:59:05,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742199_1375 (size=185) 2024-12-13T00:59:05,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742200_1376 (size=567) 2024-12-13T00:59:05,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742200_1376 (size=567) 2024-12-13T00:59:05,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742200_1376 (size=567) 2024-12-13T00:59:05,852 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar 2024-12-13T00:59:05,852 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-13T00:59:05,853 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-13T00:59:05,853 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-13T00:59:06,865 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/hadoop-15700198359618109177.jar 2024-12-13T00:59:06,865 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-13T00:59:06,865 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-13T00:59:06,935 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/hadoop-11360559810699010463.jar 2024-12-13T00:59:06,935 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-13T00:59:06,936 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-13T00:59:06,936 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-13T00:59:06,936 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-13T00:59:06,936 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-13T00:59:06,937 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-13T00:59:06,937 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-13T00:59:06,937 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-13T00:59:06,937 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-13T00:59:06,937 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-13T00:59:06,937 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-13T00:59:06,938 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-13T00:59:06,938 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-13T00:59:06,938 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-13T00:59:06,938 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-13T00:59:06,938 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-13T00:59:06,938 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-13T00:59:06,939 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-13T00:59:06,939 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-13T00:59:06,939 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-13T00:59:06,939 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-13T00:59:06,939 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-13T00:59:06,940 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-13T00:59:06,940 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-13T00:59:06,940 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-13T00:59:06,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742201_1377 (size=127628) 2024-12-13T00:59:06,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742201_1377 (size=127628) 2024-12-13T00:59:06,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742201_1377 (size=127628) 2024-12-13T00:59:07,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742202_1378 (size=2172137) 2024-12-13T00:59:07,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742202_1378 (size=2172137) 2024-12-13T00:59:07,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742202_1378 (size=2172137) 2024-12-13T00:59:07,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742203_1379 (size=213228) 2024-12-13T00:59:07,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742203_1379 (size=213228) 2024-12-13T00:59:07,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742203_1379 (size=213228) 2024-12-13T00:59:07,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742204_1380 (size=1877034) 2024-12-13T00:59:07,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742204_1380 (size=1877034) 2024-12-13T00:59:07,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742204_1380 (size=1877034) 2024-12-13T00:59:07,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742205_1381 (size=533455) 2024-12-13T00:59:07,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742205_1381 (size=533455) 2024-12-13T00:59:07,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742205_1381 (size=533455) 2024-12-13T00:59:07,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742206_1382 (size=7280644) 2024-12-13T00:59:07,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742206_1382 (size=7280644) 2024-12-13T00:59:07,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742206_1382 (size=7280644) 2024-12-13T00:59:07,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742207_1383 (size=6350924) 2024-12-13T00:59:07,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742207_1383 (size=6350924) 2024-12-13T00:59:07,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742207_1383 (size=6350924) 2024-12-13T00:59:07,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742208_1384 (size=4188619) 2024-12-13T00:59:07,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742208_1384 (size=4188619) 2024-12-13T00:59:07,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742208_1384 (size=4188619) 2024-12-13T00:59:07,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742209_1385 (size=20406) 2024-12-13T00:59:07,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742209_1385 (size=20406) 2024-12-13T00:59:07,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742209_1385 (size=20406) 2024-12-13T00:59:07,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742210_1386 (size=75495) 2024-12-13T00:59:07,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742210_1386 (size=75495) 2024-12-13T00:59:07,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742210_1386 (size=75495) 2024-12-13T00:59:07,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742211_1387 (size=45609) 2024-12-13T00:59:07,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742211_1387 (size=45609) 2024-12-13T00:59:07,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742211_1387 (size=45609) 2024-12-13T00:59:07,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742212_1388 (size=110084) 2024-12-13T00:59:07,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742212_1388 (size=110084) 2024-12-13T00:59:07,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742212_1388 (size=110084) 2024-12-13T00:59:07,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742213_1389 (size=1323991) 2024-12-13T00:59:07,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742213_1389 (size=1323991) 2024-12-13T00:59:07,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742213_1389 (size=1323991) 2024-12-13T00:59:07,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742214_1390 (size=23076) 2024-12-13T00:59:07,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742214_1390 (size=23076) 2024-12-13T00:59:07,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742214_1390 (size=23076) 2024-12-13T00:59:07,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742215_1391 (size=126803) 2024-12-13T00:59:07,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742215_1391 (size=126803) 2024-12-13T00:59:07,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742215_1391 (size=126803) 2024-12-13T00:59:07,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742216_1392 (size=322274) 2024-12-13T00:59:07,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742216_1392 (size=322274) 2024-12-13T00:59:07,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742216_1392 (size=322274) 2024-12-13T00:59:07,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742217_1393 (size=1832290) 2024-12-13T00:59:07,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742217_1393 (size=1832290) 2024-12-13T00:59:07,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742217_1393 (size=1832290) 2024-12-13T00:59:07,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742218_1394 (size=30081) 2024-12-13T00:59:07,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742218_1394 (size=30081) 2024-12-13T00:59:07,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742218_1394 (size=30081) 2024-12-13T00:59:07,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742219_1395 (size=53616) 2024-12-13T00:59:07,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742219_1395 (size=53616) 2024-12-13T00:59:07,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742219_1395 (size=53616) 2024-12-13T00:59:07,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742220_1396 (size=29229) 2024-12-13T00:59:07,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742220_1396 (size=29229) 2024-12-13T00:59:07,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742220_1396 (size=29229) 2024-12-13T00:59:07,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742221_1397 (size=169089) 2024-12-13T00:59:07,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742221_1397 (size=169089) 2024-12-13T00:59:07,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742221_1397 (size=169089) 2024-12-13T00:59:07,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742222_1398 (size=451756) 2024-12-13T00:59:07,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742222_1398 (size=451756) 2024-12-13T00:59:07,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742222_1398 (size=451756) 2024-12-13T00:59:07,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742223_1399 (size=5175431) 2024-12-13T00:59:07,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742223_1399 (size=5175431) 2024-12-13T00:59:07,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742223_1399 (size=5175431) 2024-12-13T00:59:07,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742224_1400 (size=136454) 2024-12-13T00:59:07,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742224_1400 (size=136454) 2024-12-13T00:59:07,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742224_1400 (size=136454) 2024-12-13T00:59:07,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742225_1401 (size=907855) 2024-12-13T00:59:07,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742225_1401 (size=907855) 2024-12-13T00:59:07,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742225_1401 (size=907855) 2024-12-13T00:59:07,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742226_1402 (size=3317408) 2024-12-13T00:59:07,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742226_1402 (size=3317408) 2024-12-13T00:59:07,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742226_1402 (size=3317408) 2024-12-13T00:59:07,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742227_1403 (size=503880) 2024-12-13T00:59:07,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742227_1403 (size=503880) 2024-12-13T00:59:07,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742227_1403 (size=503880) 2024-12-13T00:59:07,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742228_1404 (size=4695811) 2024-12-13T00:59:07,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742228_1404 (size=4695811) 2024-12-13T00:59:07,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742228_1404 (size=4695811) 2024-12-13T00:59:07,324 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-13T00:59:07,327 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'emptySnaptb0-testEmptyExportFileSystemState' hfile list 2024-12-13T00:59:07,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742229_1405 (size=7) 2024-12-13T00:59:07,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742229_1405 (size=7) 2024-12-13T00:59:07,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742229_1405 (size=7) 2024-12-13T00:59:07,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742230_1406 (size=10) 2024-12-13T00:59:07,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742230_1406 (size=10) 2024-12-13T00:59:07,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742230_1406 (size=10) 2024-12-13T00:59:07,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742231_1407 (size=304790) 2024-12-13T00:59:07,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742231_1407 (size=304790) 2024-12-13T00:59:07,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742231_1407 (size=304790) 2024-12-13T00:59:07,372 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-13T00:59:07,372 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-13T00:59:07,403 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734051393214_0007_000001 (auth:SIMPLE) from 127.0.0.1:50794 2024-12-13T00:59:08,881 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-13T00:59:13,026 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734051393214_0007_000001 (auth:SIMPLE) from 127.0.0.1:49678 2024-12-13T00:59:13,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742232_1408 (size=350440) 2024-12-13T00:59:13,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742232_1408 (size=350440) 2024-12-13T00:59:13,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742232_1408 (size=350440) 2024-12-13T00:59:14,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742233_1409 (size=8568) 2024-12-13T00:59:14,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742233_1409 (size=8568) 2024-12-13T00:59:14,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742233_1409 (size=8568) 2024-12-13T00:59:14,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742234_1410 (size=460) 2024-12-13T00:59:14,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742234_1410 (size=460) 2024-12-13T00:59:14,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742234_1410 (size=460) 2024-12-13T00:59:14,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742235_1411 (size=8568) 2024-12-13T00:59:14,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742235_1411 (size=8568) 2024-12-13T00:59:14,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742235_1411 (size=8568) 2024-12-13T00:59:14,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742236_1412 (size=350440) 2024-12-13T00:59:14,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742236_1412 (size=350440) 2024-12-13T00:59:14,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742236_1412 (size=350440) 2024-12-13T00:59:15,480 INFO [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export 2024-12-13T00:59:15,481 INFO [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity. 2024-12-13T00:59:15,486 INFO [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: emptySnaptb0-testEmptyExportFileSystemState 2024-12-13T00:59:15,486 INFO [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot 2024-12-13T00:59:15,487 INFO [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state 2024-12-13T00:59:15,487 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1349531497_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState at hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-12-13T00:59:15,488 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/.snapshotinfo 2024-12-13T00:59:15,488 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/data.manifest 2024-12-13T00:59:15,488 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1349531497_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/export-test/export-1734051545773/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState at hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/export-test/export-1734051545773/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-12-13T00:59:15,488 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/export-test/export-1734051545773/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/.snapshotinfo 2024-12-13T00:59:15,488 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/export-test/export-1734051545773/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/data.manifest 2024-12-13T00:59:15,495 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testEmptyExportFileSystemState 2024-12-13T00:59:15,496 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testEmptyExportFileSystemState 2024-12-13T00:59:15,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] procedure2.ProcedureExecutor(1098): Stored pid=173, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-13T00:59:15,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-13T00:59:15,500 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734051555499"}]},"ts":"1734051555499"} 2024-12-13T00:59:15,502 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testEmptyExportFileSystemState, state=DISABLING in hbase:meta 2024-12-13T00:59:15,504 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testtb-testEmptyExportFileSystemState to state=DISABLING 2024-12-13T00:59:15,505 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=174, ppid=173, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testEmptyExportFileSystemState}] 2024-12-13T00:59:15,507 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=175, ppid=174, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=872532911785fca45e81aafb65ef2378, UNASSIGN}, {pid=176, ppid=174, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=fcb94816914bd110b655326b2f519743, UNASSIGN}] 2024-12-13T00:59:15,509 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=176, ppid=174, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=fcb94816914bd110b655326b2f519743, UNASSIGN 2024-12-13T00:59:15,509 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=175, ppid=174, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=872532911785fca45e81aafb65ef2378, UNASSIGN 2024-12-13T00:59:15,510 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=176 updating hbase:meta row=fcb94816914bd110b655326b2f519743, regionState=CLOSING, regionLocation=9812beb1e4cd,46075,1734051384875 2024-12-13T00:59:15,510 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=175 updating hbase:meta row=872532911785fca45e81aafb65ef2378, regionState=CLOSING, regionLocation=9812beb1e4cd,45231,1734051384968 2024-12-13T00:59:15,512 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-13T00:59:15,512 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=177, ppid=175, state=RUNNABLE; CloseRegionProcedure 872532911785fca45e81aafb65ef2378, server=9812beb1e4cd,45231,1734051384968}] 2024-12-13T00:59:15,513 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-13T00:59:15,513 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=178, ppid=176, state=RUNNABLE; CloseRegionProcedure fcb94816914bd110b655326b2f519743, server=9812beb1e4cd,46075,1734051384875}] 2024-12-13T00:59:15,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-13T00:59:15,664 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,45231,1734051384968 2024-12-13T00:59:15,665 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] handler.UnassignRegionHandler(124): Close 872532911785fca45e81aafb65ef2378 2024-12-13T00:59:15,665 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-13T00:59:15,665 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] regionserver.HRegion(1681): Closing 872532911785fca45e81aafb65ef2378, disabling compactions & flushes 2024-12-13T00:59:15,665 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] regionserver.HRegion(1703): Closing region testtb-testEmptyExportFileSystemState,,1734051543688.872532911785fca45e81aafb65ef2378. 2024-12-13T00:59:15,665 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testEmptyExportFileSystemState,,1734051543688.872532911785fca45e81aafb65ef2378. 2024-12-13T00:59:15,665 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] regionserver.HRegion(1791): Acquired close lock on testtb-testEmptyExportFileSystemState,,1734051543688.872532911785fca45e81aafb65ef2378. after waiting 0 ms 2024-12-13T00:59:15,665 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] regionserver.HRegion(1801): Updates disabled for region testtb-testEmptyExportFileSystemState,,1734051543688.872532911785fca45e81aafb65ef2378. 2024-12-13T00:59:15,666 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,46075,1734051384875 2024-12-13T00:59:15,667 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] handler.UnassignRegionHandler(124): Close fcb94816914bd110b655326b2f519743 2024-12-13T00:59:15,667 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-13T00:59:15,667 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1681): Closing fcb94816914bd110b655326b2f519743, disabling compactions & flushes 2024-12-13T00:59:15,667 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1703): Closing region testtb-testEmptyExportFileSystemState,1,1734051543688.fcb94816914bd110b655326b2f519743. 2024-12-13T00:59:15,667 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testEmptyExportFileSystemState,1,1734051543688.fcb94816914bd110b655326b2f519743. 2024-12-13T00:59:15,667 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1791): Acquired close lock on testtb-testEmptyExportFileSystemState,1,1734051543688.fcb94816914bd110b655326b2f519743. after waiting 0 ms 2024-12-13T00:59:15,667 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1801): Updates disabled for region testtb-testEmptyExportFileSystemState,1,1734051543688.fcb94816914bd110b655326b2f519743. 2024-12-13T00:59:15,672 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testEmptyExportFileSystemState/872532911785fca45e81aafb65ef2378/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-13T00:59:15,673 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-13T00:59:15,673 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] regionserver.HRegion(1922): Closed testtb-testEmptyExportFileSystemState,,1734051543688.872532911785fca45e81aafb65ef2378. 2024-12-13T00:59:15,673 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] regionserver.HRegion(1635): Region close journal for 872532911785fca45e81aafb65ef2378: 2024-12-13T00:59:15,675 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testEmptyExportFileSystemState/fcb94816914bd110b655326b2f519743/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-13T00:59:15,676 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] handler.UnassignRegionHandler(170): Closed 872532911785fca45e81aafb65ef2378 2024-12-13T00:59:15,676 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-13T00:59:15,677 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1922): Closed testtb-testEmptyExportFileSystemState,1,1734051543688.fcb94816914bd110b655326b2f519743. 2024-12-13T00:59:15,677 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1635): Region close journal for fcb94816914bd110b655326b2f519743: 2024-12-13T00:59:15,677 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=175 updating hbase:meta row=872532911785fca45e81aafb65ef2378, regionState=CLOSED 2024-12-13T00:59:15,678 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] handler.UnassignRegionHandler(170): Closed fcb94816914bd110b655326b2f519743 2024-12-13T00:59:15,678 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=176 updating hbase:meta row=fcb94816914bd110b655326b2f519743, regionState=CLOSED 2024-12-13T00:59:15,680 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=177, resume processing ppid=175 2024-12-13T00:59:15,681 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=177, ppid=175, state=SUCCESS; CloseRegionProcedure 872532911785fca45e81aafb65ef2378, server=9812beb1e4cd,45231,1734051384968 in 166 msec 2024-12-13T00:59:15,683 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=178, resume processing ppid=176 2024-12-13T00:59:15,683 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=175, ppid=174, state=SUCCESS; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=872532911785fca45e81aafb65ef2378, UNASSIGN in 174 msec 2024-12-13T00:59:15,683 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=178, ppid=176, state=SUCCESS; CloseRegionProcedure fcb94816914bd110b655326b2f519743, server=9812beb1e4cd,46075,1734051384875 in 167 msec 2024-12-13T00:59:15,684 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=176, resume processing ppid=174 2024-12-13T00:59:15,684 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=176, ppid=174, state=SUCCESS; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=fcb94816914bd110b655326b2f519743, UNASSIGN in 176 msec 2024-12-13T00:59:15,685 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=174, resume processing ppid=173 2024-12-13T00:59:15,685 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=174, ppid=173, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testEmptyExportFileSystemState in 179 msec 2024-12-13T00:59:15,687 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734051555686"}]},"ts":"1734051555686"} 2024-12-13T00:59:15,688 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testEmptyExportFileSystemState, state=DISABLED in hbase:meta 2024-12-13T00:59:15,690 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set testtb-testEmptyExportFileSystemState to state=DISABLED 2024-12-13T00:59:15,691 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=173, state=SUCCESS; DisableTableProcedure table=testtb-testEmptyExportFileSystemState in 194 msec 2024-12-13T00:59:15,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-13T00:59:15,802 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testEmptyExportFileSystemState, procId: 173 completed 2024-12-13T00:59:15,802 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testEmptyExportFileSystemState 2024-12-13T00:59:15,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] procedure2.ProcedureExecutor(1098): Stored pid=179, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-13T00:59:15,804 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=179, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-13T00:59:15,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testEmptyExportFileSystemState 2024-12-13T00:59:15,805 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=179, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-13T00:59:15,807 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40687 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testEmptyExportFileSystemState 2024-12-13T00:59:15,809 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testEmptyExportFileSystemState/872532911785fca45e81aafb65ef2378 2024-12-13T00:59:15,809 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testEmptyExportFileSystemState/fcb94816914bd110b655326b2f519743 2024-12-13T00:59:15,810 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-13T00:59:15,810 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45231-0x10088dfff310002, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-13T00:59:15,810 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40687-0x10088dfff310003, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-13T00:59:15,810 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46075-0x10088dfff310001, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-13T00:59:15,811 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-12-13T00:59:15,811 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-12-13T00:59:15,812 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-12-13T00:59:15,812 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-12-13T00:59:15,812 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testEmptyExportFileSystemState/fcb94816914bd110b655326b2f519743/cf, FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testEmptyExportFileSystemState/fcb94816914bd110b655326b2f519743/recovered.edits] 2024-12-13T00:59:15,813 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testEmptyExportFileSystemState/872532911785fca45e81aafb65ef2378/cf, FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testEmptyExportFileSystemState/872532911785fca45e81aafb65ef2378/recovered.edits] 2024-12-13T00:59:15,814 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45231-0x10088dfff310002, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-13T00:59:15,814 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45231-0x10088dfff310002, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T00:59:15,814 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-13T00:59:15,814 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T00:59:15,814 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46075-0x10088dfff310001, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-13T00:59:15,814 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40687-0x10088dfff310003, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-13T00:59:15,814 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46075-0x10088dfff310001, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T00:59:15,814 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40687-0x10088dfff310003, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T00:59:15,815 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-13T00:59:15,816 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-13T00:59:15,816 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-13T00:59:15,817 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-13T00:59:15,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-12-13T00:59:15,820 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testEmptyExportFileSystemState/872532911785fca45e81aafb65ef2378/cf/591e9eaba9d946b5bb097b48e9bf9a69 to hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/archive/data/default/testtb-testEmptyExportFileSystemState/872532911785fca45e81aafb65ef2378/cf/591e9eaba9d946b5bb097b48e9bf9a69 2024-12-13T00:59:15,820 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testEmptyExportFileSystemState/fcb94816914bd110b655326b2f519743/cf/27a56663c8314476a1d47ad250c6d6bf to hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/archive/data/default/testtb-testEmptyExportFileSystemState/fcb94816914bd110b655326b2f519743/cf/27a56663c8314476a1d47ad250c6d6bf 2024-12-13T00:59:15,824 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testEmptyExportFileSystemState/872532911785fca45e81aafb65ef2378/recovered.edits/9.seqid to hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/archive/data/default/testtb-testEmptyExportFileSystemState/872532911785fca45e81aafb65ef2378/recovered.edits/9.seqid 2024-12-13T00:59:15,825 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testEmptyExportFileSystemState/fcb94816914bd110b655326b2f519743/recovered.edits/9.seqid to hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/archive/data/default/testtb-testEmptyExportFileSystemState/fcb94816914bd110b655326b2f519743/recovered.edits/9.seqid 2024-12-13T00:59:15,825 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testEmptyExportFileSystemState/872532911785fca45e81aafb65ef2378 2024-12-13T00:59:15,825 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testEmptyExportFileSystemState/fcb94816914bd110b655326b2f519743 2024-12-13T00:59:15,825 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testEmptyExportFileSystemState regions 2024-12-13T00:59:15,828 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=179, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-13T00:59:15,831 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testEmptyExportFileSystemState from hbase:meta 2024-12-13T00:59:15,834 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testEmptyExportFileSystemState' descriptor. 2024-12-13T00:59:15,835 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=179, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-13T00:59:15,835 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testEmptyExportFileSystemState' from region states. 2024-12-13T00:59:15,836 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState,,1734051543688.872532911785fca45e81aafb65ef2378.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734051555835"}]},"ts":"9223372036854775807"} 2024-12-13T00:59:15,836 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState,1,1734051543688.fcb94816914bd110b655326b2f519743.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734051555835"}]},"ts":"9223372036854775807"} 2024-12-13T00:59:15,840 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-13T00:59:15,840 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 872532911785fca45e81aafb65ef2378, NAME => 'testtb-testEmptyExportFileSystemState,,1734051543688.872532911785fca45e81aafb65ef2378.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => fcb94816914bd110b655326b2f519743, NAME => 'testtb-testEmptyExportFileSystemState,1,1734051543688.fcb94816914bd110b655326b2f519743.', STARTKEY => '1', ENDKEY => ''}] 2024-12-13T00:59:15,840 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testEmptyExportFileSystemState' as deleted. 2024-12-13T00:59:15,840 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734051555840"}]},"ts":"9223372036854775807"} 2024-12-13T00:59:15,844 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testEmptyExportFileSystemState state from META 2024-12-13T00:59:15,846 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=179, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-13T00:59:15,847 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=179, state=SUCCESS; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState in 44 msec 2024-12-13T00:59:15,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-12-13T00:59:15,918 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testEmptyExportFileSystemState, procId: 179 completed 2024-12-13T00:59:15,925 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testEmptyExportFileSystemState" 2024-12-13T00:59:15,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testEmptyExportFileSystemState 2024-12-13T00:59:15,927 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testEmptyExportFileSystemState" 2024-12-13T00:59:15,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testEmptyExportFileSystemState 2024-12-13T00:59:15,956 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testEmptyExportFileSystemState Thread=806 (was 790) Potentially hanging thread: IPC Client (1941338361) connection to localhost/127.0.0.1:43849 from appattempt_1734051393214_0007_000001 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (1941338361) connection to localhost/127.0.0.1:45843 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_2084191314_1 at /127.0.0.1:41030 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1349531497_22 at /127.0.0.1:41054 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 6872) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x71b2f156-shared-pool-41 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1349531497_22 at /127.0.0.1:43322 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x71b2f156-shared-pool-40 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x71b2f156-shared-pool-42 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1349531497_22 at /127.0.0.1:35346 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x71b2f156-shared-pool-43 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-5541 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: IPC Client (1941338361) connection to localhost/127.0.0.1:45729 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45729 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_2084191314_1 at /127.0.0.1:35334 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=791 (was 767) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=548 (was 574), ProcessCount=17 (was 11) - ProcessCount LEAK? -, AvailableMemoryMB=3745 (was 4499) 2024-12-13T00:59:15,956 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=806 is superior to 500 2024-12-13T00:59:15,980 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportWithChecksum Thread=806, OpenFileDescriptor=791, MaxFileDescriptor=1048576, SystemLoadAverage=548, ProcessCount=17, AvailableMemoryMB=3743 2024-12-13T00:59:15,980 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=806 is superior to 500 2024-12-13T00:59:15,982 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-13T00:59:15,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] procedure2.ProcedureExecutor(1098): Stored pid=180, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testExportWithChecksum 2024-12-13T00:59:15,984 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_PRE_OPERATION 2024-12-13T00:59:15,984 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T00:59:15,984 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithChecksum" procId is: 180 2024-12-13T00:59:15,985 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-13T00:59:15,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=180 2024-12-13T00:59:15,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742237_1413 (size=404) 2024-12-13T00:59:15,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742237_1413 (size=404) 2024-12-13T00:59:15,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742237_1413 (size=404) 2024-12-13T00:59:16,000 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 779755e67e6754487b988f6d1c68ab8b, NAME => 'testtb-testExportWithChecksum,,1734051555982.779755e67e6754487b988f6d1c68ab8b.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee 2024-12-13T00:59:16,000 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => 948ff07492d339d962636cab7d38112b, NAME => 'testtb-testExportWithChecksum,1,1734051555982.948ff07492d339d962636cab7d38112b.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee 2024-12-13T00:59:16,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742238_1414 (size=65) 2024-12-13T00:59:16,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742238_1414 (size=65) 2024-12-13T00:59:16,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742238_1414 (size=65) 2024-12-13T00:59:16,030 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testExportWithChecksum,,1734051555982.779755e67e6754487b988f6d1c68ab8b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T00:59:16,030 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1681): Closing 779755e67e6754487b988f6d1c68ab8b, disabling compactions & flushes 2024-12-13T00:59:16,030 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testExportWithChecksum,,1734051555982.779755e67e6754487b988f6d1c68ab8b. 2024-12-13T00:59:16,030 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithChecksum,,1734051555982.779755e67e6754487b988f6d1c68ab8b. 2024-12-13T00:59:16,030 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithChecksum,,1734051555982.779755e67e6754487b988f6d1c68ab8b. after waiting 0 ms 2024-12-13T00:59:16,030 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithChecksum,,1734051555982.779755e67e6754487b988f6d1c68ab8b. 2024-12-13T00:59:16,030 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testExportWithChecksum,,1734051555982.779755e67e6754487b988f6d1c68ab8b. 2024-12-13T00:59:16,030 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1635): Region close journal for 779755e67e6754487b988f6d1c68ab8b: 2024-12-13T00:59:16,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742239_1415 (size=65) 2024-12-13T00:59:16,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742239_1415 (size=65) 2024-12-13T00:59:16,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742239_1415 (size=65) 2024-12-13T00:59:16,036 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testExportWithChecksum,1,1734051555982.948ff07492d339d962636cab7d38112b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T00:59:16,036 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1681): Closing 948ff07492d339d962636cab7d38112b, disabling compactions & flushes 2024-12-13T00:59:16,036 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testExportWithChecksum,1,1734051555982.948ff07492d339d962636cab7d38112b. 2024-12-13T00:59:16,036 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithChecksum,1,1734051555982.948ff07492d339d962636cab7d38112b. 2024-12-13T00:59:16,036 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithChecksum,1,1734051555982.948ff07492d339d962636cab7d38112b. after waiting 0 ms 2024-12-13T00:59:16,036 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithChecksum,1,1734051555982.948ff07492d339d962636cab7d38112b. 2024-12-13T00:59:16,036 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testExportWithChecksum,1,1734051555982.948ff07492d339d962636cab7d38112b. 2024-12-13T00:59:16,036 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1635): Region close journal for 948ff07492d339d962636cab7d38112b: 2024-12-13T00:59:16,037 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_ADD_TO_META 2024-12-13T00:59:16,038 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportWithChecksum,,1734051555982.779755e67e6754487b988f6d1c68ab8b.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1734051556037"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734051556037"}]},"ts":"1734051556037"} 2024-12-13T00:59:16,038 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportWithChecksum,1,1734051555982.948ff07492d339d962636cab7d38112b.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1734051556037"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734051556037"}]},"ts":"1734051556037"} 2024-12-13T00:59:16,041 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-13T00:59:16,042 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-13T00:59:16,042 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734051556042"}]},"ts":"1734051556042"} 2024-12-13T00:59:16,043 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithChecksum, state=ENABLING in hbase:meta 2024-12-13T00:59:16,047 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(202): Hosts are {9812beb1e4cd=0} racks are {/default-rack=0} 2024-12-13T00:59:16,049 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-13T00:59:16,049 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-13T00:59:16,049 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-13T00:59:16,049 INFO [PEWorker-1 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-13T00:59:16,049 INFO [PEWorker-1 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-13T00:59:16,049 INFO [PEWorker-1 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-13T00:59:16,049 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-13T00:59:16,049 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=779755e67e6754487b988f6d1c68ab8b, ASSIGN}, {pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=948ff07492d339d962636cab7d38112b, ASSIGN}] 2024-12-13T00:59:16,051 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=948ff07492d339d962636cab7d38112b, ASSIGN 2024-12-13T00:59:16,051 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=779755e67e6754487b988f6d1c68ab8b, ASSIGN 2024-12-13T00:59:16,052 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=779755e67e6754487b988f6d1c68ab8b, ASSIGN; state=OFFLINE, location=9812beb1e4cd,40687,1734051385054; forceNewPlan=false, retain=false 2024-12-13T00:59:16,052 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=948ff07492d339d962636cab7d38112b, ASSIGN; state=OFFLINE, location=9812beb1e4cd,46075,1734051384875; forceNewPlan=false, retain=false 2024-12-13T00:59:16,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=180 2024-12-13T00:59:16,203 INFO [9812beb1e4cd:38293 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-13T00:59:16,203 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=181 updating hbase:meta row=779755e67e6754487b988f6d1c68ab8b, regionState=OPENING, regionLocation=9812beb1e4cd,40687,1734051385054 2024-12-13T00:59:16,203 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=182 updating hbase:meta row=948ff07492d339d962636cab7d38112b, regionState=OPENING, regionLocation=9812beb1e4cd,46075,1734051384875 2024-12-13T00:59:16,206 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=183, ppid=181, state=RUNNABLE; OpenRegionProcedure 779755e67e6754487b988f6d1c68ab8b, server=9812beb1e4cd,40687,1734051385054}] 2024-12-13T00:59:16,207 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=184, ppid=182, state=RUNNABLE; OpenRegionProcedure 948ff07492d339d962636cab7d38112b, server=9812beb1e4cd,46075,1734051384875}] 2024-12-13T00:59:16,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=180 2024-12-13T00:59:16,358 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,46075,1734051384875 2024-12-13T00:59:16,358 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,40687,1734051385054 2024-12-13T00:59:16,362 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] handler.AssignRegionHandler(135): Open testtb-testExportWithChecksum,1,1734051555982.948ff07492d339d962636cab7d38112b. 2024-12-13T00:59:16,362 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7285): Opening region: {ENCODED => 948ff07492d339d962636cab7d38112b, NAME => 'testtb-testExportWithChecksum,1,1734051555982.948ff07492d339d962636cab7d38112b.', STARTKEY => '1', ENDKEY => ''} 2024-12-13T00:59:16,363 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportWithChecksum,1,1734051555982.948ff07492d339d962636cab7d38112b. service=AccessControlService 2024-12-13T00:59:16,363 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-13T00:59:16,363 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithChecksum 948ff07492d339d962636cab7d38112b 2024-12-13T00:59:16,363 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(894): Instantiated testtb-testExportWithChecksum,1,1734051555982.948ff07492d339d962636cab7d38112b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T00:59:16,363 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7327): checking encryption for 948ff07492d339d962636cab7d38112b 2024-12-13T00:59:16,363 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7330): checking classloading for 948ff07492d339d962636cab7d38112b 2024-12-13T00:59:16,364 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] handler.AssignRegionHandler(135): Open testtb-testExportWithChecksum,,1734051555982.779755e67e6754487b988f6d1c68ab8b. 2024-12-13T00:59:16,364 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7285): Opening region: {ENCODED => 779755e67e6754487b988f6d1c68ab8b, NAME => 'testtb-testExportWithChecksum,,1734051555982.779755e67e6754487b988f6d1c68ab8b.', STARTKEY => '', ENDKEY => '1'} 2024-12-13T00:59:16,364 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportWithChecksum,,1734051555982.779755e67e6754487b988f6d1c68ab8b. service=AccessControlService 2024-12-13T00:59:16,364 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-13T00:59:16,364 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithChecksum 779755e67e6754487b988f6d1c68ab8b 2024-12-13T00:59:16,364 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(894): Instantiated testtb-testExportWithChecksum,,1734051555982.779755e67e6754487b988f6d1c68ab8b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T00:59:16,364 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7327): checking encryption for 779755e67e6754487b988f6d1c68ab8b 2024-12-13T00:59:16,364 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7330): checking classloading for 779755e67e6754487b988f6d1c68ab8b 2024-12-13T00:59:16,368 INFO [StoreOpener-948ff07492d339d962636cab7d38112b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 948ff07492d339d962636cab7d38112b 2024-12-13T00:59:16,368 INFO [StoreOpener-779755e67e6754487b988f6d1c68ab8b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 779755e67e6754487b988f6d1c68ab8b 2024-12-13T00:59:16,370 INFO [StoreOpener-948ff07492d339d962636cab7d38112b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 948ff07492d339d962636cab7d38112b columnFamilyName cf 2024-12-13T00:59:16,370 INFO [StoreOpener-779755e67e6754487b988f6d1c68ab8b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 779755e67e6754487b988f6d1c68ab8b columnFamilyName cf 2024-12-13T00:59:16,370 DEBUG [StoreOpener-948ff07492d339d962636cab7d38112b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T00:59:16,370 DEBUG [StoreOpener-779755e67e6754487b988f6d1c68ab8b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T00:59:16,370 INFO [StoreOpener-948ff07492d339d962636cab7d38112b-1 {}] regionserver.HStore(327): Store=948ff07492d339d962636cab7d38112b/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T00:59:16,370 INFO [StoreOpener-779755e67e6754487b988f6d1c68ab8b-1 {}] regionserver.HStore(327): Store=779755e67e6754487b988f6d1c68ab8b/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T00:59:16,371 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithChecksum/779755e67e6754487b988f6d1c68ab8b 2024-12-13T00:59:16,371 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithChecksum/948ff07492d339d962636cab7d38112b 2024-12-13T00:59:16,371 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithChecksum/779755e67e6754487b988f6d1c68ab8b 2024-12-13T00:59:16,371 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithChecksum/948ff07492d339d962636cab7d38112b 2024-12-13T00:59:16,373 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1085): writing seq id for 948ff07492d339d962636cab7d38112b 2024-12-13T00:59:16,373 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1085): writing seq id for 779755e67e6754487b988f6d1c68ab8b 2024-12-13T00:59:16,375 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithChecksum/948ff07492d339d962636cab7d38112b/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T00:59:16,375 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithChecksum/779755e67e6754487b988f6d1c68ab8b/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T00:59:16,376 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1102): Opened 948ff07492d339d962636cab7d38112b; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75094624, jitterRate=0.11899709701538086}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-13T00:59:16,376 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1102): Opened 779755e67e6754487b988f6d1c68ab8b; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64365753, jitterRate=-0.04087553918361664}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-13T00:59:16,376 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1001): Region open journal for 948ff07492d339d962636cab7d38112b: 2024-12-13T00:59:16,376 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1001): Region open journal for 779755e67e6754487b988f6d1c68ab8b: 2024-12-13T00:59:16,377 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportWithChecksum,1,1734051555982.948ff07492d339d962636cab7d38112b., pid=184, masterSystemTime=1734051556358 2024-12-13T00:59:16,377 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportWithChecksum,,1734051555982.779755e67e6754487b988f6d1c68ab8b., pid=183, masterSystemTime=1734051556358 2024-12-13T00:59:16,379 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportWithChecksum,,1734051555982.779755e67e6754487b988f6d1c68ab8b. 2024-12-13T00:59:16,379 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] handler.AssignRegionHandler(164): Opened testtb-testExportWithChecksum,,1734051555982.779755e67e6754487b988f6d1c68ab8b. 2024-12-13T00:59:16,379 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=181 updating hbase:meta row=779755e67e6754487b988f6d1c68ab8b, regionState=OPEN, openSeqNum=2, regionLocation=9812beb1e4cd,40687,1734051385054 2024-12-13T00:59:16,380 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportWithChecksum,1,1734051555982.948ff07492d339d962636cab7d38112b. 2024-12-13T00:59:16,380 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] handler.AssignRegionHandler(164): Opened testtb-testExportWithChecksum,1,1734051555982.948ff07492d339d962636cab7d38112b. 2024-12-13T00:59:16,380 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=182 updating hbase:meta row=948ff07492d339d962636cab7d38112b, regionState=OPEN, openSeqNum=2, regionLocation=9812beb1e4cd,46075,1734051384875 2024-12-13T00:59:16,382 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=183, resume processing ppid=181 2024-12-13T00:59:16,382 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=183, ppid=181, state=SUCCESS; OpenRegionProcedure 779755e67e6754487b988f6d1c68ab8b, server=9812beb1e4cd,40687,1734051385054 in 174 msec 2024-12-13T00:59:16,386 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=184, resume processing ppid=182 2024-12-13T00:59:16,386 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=184, ppid=182, state=SUCCESS; OpenRegionProcedure 948ff07492d339d962636cab7d38112b, server=9812beb1e4cd,46075,1734051384875 in 175 msec 2024-12-13T00:59:16,386 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=181, ppid=180, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=779755e67e6754487b988f6d1c68ab8b, ASSIGN in 333 msec 2024-12-13T00:59:16,387 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=182, resume processing ppid=180 2024-12-13T00:59:16,387 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=182, ppid=180, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=948ff07492d339d962636cab7d38112b, ASSIGN in 337 msec 2024-12-13T00:59:16,388 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-13T00:59:16,388 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734051556388"}]},"ts":"1734051556388"} 2024-12-13T00:59:16,389 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithChecksum, state=ENABLED in hbase:meta 2024-12-13T00:59:16,392 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_POST_OPERATION 2024-12-13T00:59:16,392 DEBUG [PEWorker-5 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testExportWithChecksum jenkins: RWXCA 2024-12-13T00:59:16,394 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40687 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-12-13T00:59:16,396 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45231-0x10088dfff310002, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T00:59:16,396 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T00:59:16,396 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46075-0x10088dfff310001, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T00:59:16,396 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40687-0x10088dfff310003, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T00:59:16,398 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-13T00:59:16,398 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-13T00:59:16,398 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-13T00:59:16,398 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-13T00:59:16,398 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-13T00:59:16,398 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-13T00:59:16,398 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-13T00:59:16,399 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-13T00:59:16,404 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=180, state=SUCCESS; CreateTableProcedure table=testtb-testExportWithChecksum in 417 msec 2024-12-13T00:59:16,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=180 2024-12-13T00:59:16,589 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testExportWithChecksum, procId: 180 completed 2024-12-13T00:59:16,589 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testtb-testExportWithChecksum get assigned. Timeout = 60000ms 2024-12-13T00:59:16,589 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-13T00:59:16,594 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testtb-testExportWithChecksum assigned to meta. Checking AM states. 2024-12-13T00:59:16,594 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-13T00:59:16,594 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testtb-testExportWithChecksum assigned. 2024-12-13T00:59:16,598 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-13T00:59:16,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734051556598 (current time:1734051556598). 2024-12-13T00:59:16,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-13T00:59:16,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testExportWithChecksum VERSION not specified, setting to 2 2024-12-13T00:59:16,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-13T00:59:16,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3d5d4dd8 to 127.0.0.1:49424 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@41a05a7d 2024-12-13T00:59:16,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@436ae45, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T00:59:16,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T00:59:16,605 INFO [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59748, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T00:59:16,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3d5d4dd8 to 127.0.0.1:49424 2024-12-13T00:59:16,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T00:59:16,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x615d8a53 to 127.0.0.1:49424 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@226d47b7 2024-12-13T00:59:16,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@13188139, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T00:59:16,612 DEBUG [hconnection-0x6cc88621-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T00:59:16,613 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59762, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T00:59:16,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x615d8a53 to 127.0.0.1:49424 2024-12-13T00:59:16,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T00:59:16,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-12-13T00:59:16,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-13T00:59:16,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] procedure2.ProcedureExecutor(1098): Stored pid=185, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-13T00:59:16,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 185 2024-12-13T00:59:16,620 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-13T00:59:16,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185 2024-12-13T00:59:16,621 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-13T00:59:16,623 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-13T00:59:16,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742240_1416 (size=161) 2024-12-13T00:59:16,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742240_1416 (size=161) 2024-12-13T00:59:16,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742240_1416 (size=161) 2024-12-13T00:59:16,639 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-13T00:59:16,639 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=186, ppid=185, state=RUNNABLE; SnapshotRegionProcedure 779755e67e6754487b988f6d1c68ab8b}, {pid=187, ppid=185, state=RUNNABLE; SnapshotRegionProcedure 948ff07492d339d962636cab7d38112b}] 2024-12-13T00:59:16,640 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=186, ppid=185, state=RUNNABLE; SnapshotRegionProcedure 779755e67e6754487b988f6d1c68ab8b 2024-12-13T00:59:16,640 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=187, ppid=185, state=RUNNABLE; SnapshotRegionProcedure 948ff07492d339d962636cab7d38112b 2024-12-13T00:59:16,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185 2024-12-13T00:59:16,792 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,46075,1734051384875 2024-12-13T00:59:16,792 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,40687,1734051385054 2024-12-13T00:59:16,792 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46075 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=187 2024-12-13T00:59:16,792 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40687 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=186 2024-12-13T00:59:16,793 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,1,1734051555982.948ff07492d339d962636cab7d38112b. 2024-12-13T00:59:16,793 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,,1734051555982.779755e67e6754487b988f6d1c68ab8b. 2024-12-13T00:59:16,793 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HRegion(2538): Flush status journal for 948ff07492d339d962636cab7d38112b: 2024-12-13T00:59:16,793 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HRegion(2538): Flush status journal for 779755e67e6754487b988f6d1c68ab8b: 2024-12-13T00:59:16,793 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,1,1734051555982.948ff07492d339d962636cab7d38112b. for emptySnaptb0-testExportWithChecksum completed. 2024-12-13T00:59:16,793 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,,1734051555982.779755e67e6754487b988f6d1c68ab8b. for emptySnaptb0-testExportWithChecksum completed. 2024-12-13T00:59:16,793 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,1,1734051555982.948ff07492d339d962636cab7d38112b.' region-info for snapshot=emptySnaptb0-testExportWithChecksum 2024-12-13T00:59:16,793 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,,1734051555982.779755e67e6754487b988f6d1c68ab8b.' region-info for snapshot=emptySnaptb0-testExportWithChecksum 2024-12-13T00:59:16,793 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-13T00:59:16,793 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-13T00:59:16,793 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-13T00:59:16,793 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-13T00:59:16,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742242_1418 (size=68) 2024-12-13T00:59:16,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742242_1418 (size=68) 2024-12-13T00:59:16,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742242_1418 (size=68) 2024-12-13T00:59:16,810 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,1,1734051555982.948ff07492d339d962636cab7d38112b. 2024-12-13T00:59:16,811 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=187 2024-12-13T00:59:16,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.HMaster(4106): Remote procedure done, pid=187 2024-12-13T00:59:16,811 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithChecksum on region 948ff07492d339d962636cab7d38112b 2024-12-13T00:59:16,811 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=187, ppid=185, state=RUNNABLE; SnapshotRegionProcedure 948ff07492d339d962636cab7d38112b 2024-12-13T00:59:16,813 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=187, ppid=185, state=SUCCESS; SnapshotRegionProcedure 948ff07492d339d962636cab7d38112b in 173 msec 2024-12-13T00:59:16,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742241_1417 (size=68) 2024-12-13T00:59:16,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742241_1417 (size=68) 2024-12-13T00:59:16,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742241_1417 (size=68) 2024-12-13T00:59:16,825 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,,1734051555982.779755e67e6754487b988f6d1c68ab8b. 2024-12-13T00:59:16,825 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=186 2024-12-13T00:59:16,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.HMaster(4106): Remote procedure done, pid=186 2024-12-13T00:59:16,825 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithChecksum on region 779755e67e6754487b988f6d1c68ab8b 2024-12-13T00:59:16,825 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=186, ppid=185, state=RUNNABLE; SnapshotRegionProcedure 779755e67e6754487b988f6d1c68ab8b 2024-12-13T00:59:16,828 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=186, resume processing ppid=185 2024-12-13T00:59:16,828 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-13T00:59:16,828 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=186, ppid=185, state=SUCCESS; SnapshotRegionProcedure 779755e67e6754487b988f6d1c68ab8b in 187 msec 2024-12-13T00:59:16,828 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-13T00:59:16,829 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-13T00:59:16,829 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithChecksum 2024-12-13T00:59:16,830 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithChecksum 2024-12-13T00:59:16,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742243_1419 (size=543) 2024-12-13T00:59:16,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742243_1419 (size=543) 2024-12-13T00:59:16,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742243_1419 (size=543) 2024-12-13T00:59:16,848 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-13T00:59:16,853 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-13T00:59:16,853 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithChecksum to hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/emptySnaptb0-testExportWithChecksum 2024-12-13T00:59:16,854 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-13T00:59:16,855 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 185 2024-12-13T00:59:16,856 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=185, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } in 237 msec 2024-12-13T00:59:16,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185 2024-12-13T00:59:16,924 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithChecksum, procId: 185 completed 2024-12-13T00:59:16,938 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40687 {}] regionserver.HRegion(8254): writing data to region testtb-testExportWithChecksum,,1734051555982.779755e67e6754487b988f6d1c68ab8b. with WAL disabled. Data may be lost in the event of a crash. 2024-12-13T00:59:16,941 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46075 {}] regionserver.HRegion(8254): writing data to region testtb-testExportWithChecksum,1,1734051555982.948ff07492d339d962636cab7d38112b. with WAL disabled. Data may be lost in the event of a crash. 2024-12-13T00:59:16,948 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testExportWithChecksum 2024-12-13T00:59:16,948 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testExportWithChecksum,,1734051555982.779755e67e6754487b988f6d1c68ab8b. 2024-12-13T00:59:16,948 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-13T00:59:16,974 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-13T00:59:16,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734051556974 (current time:1734051556974). 2024-12-13T00:59:16,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-13T00:59:16,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testExportWithChecksum VERSION not specified, setting to 2 2024-12-13T00:59:16,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-13T00:59:16,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7e9468c2 to 127.0.0.1:49424 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@564510bf 2024-12-13T00:59:16,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@630db9d0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T00:59:16,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T00:59:16,987 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59778, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T00:59:16,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7e9468c2 to 127.0.0.1:49424 2024-12-13T00:59:16,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T00:59:16,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x06448f1f to 127.0.0.1:49424 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@78faa780 2024-12-13T00:59:16,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@76b75276, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T00:59:17,013 DEBUG [hconnection-0x7488af32-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T00:59:17,014 INFO [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59788, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T00:59:17,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x06448f1f to 127.0.0.1:49424 2024-12-13T00:59:17,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T00:59:17,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-12-13T00:59:17,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-13T00:59:17,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] procedure2.ProcedureExecutor(1098): Stored pid=188, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-13T00:59:17,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 188 2024-12-13T00:59:17,023 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-13T00:59:17,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=188 2024-12-13T00:59:17,025 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-13T00:59:17,027 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-13T00:59:17,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742244_1420 (size=156) 2024-12-13T00:59:17,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742244_1420 (size=156) 2024-12-13T00:59:17,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742244_1420 (size=156) 2024-12-13T00:59:17,044 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-13T00:59:17,045 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=189, ppid=188, state=RUNNABLE; SnapshotRegionProcedure 779755e67e6754487b988f6d1c68ab8b}, {pid=190, ppid=188, state=RUNNABLE; SnapshotRegionProcedure 948ff07492d339d962636cab7d38112b}] 2024-12-13T00:59:17,046 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=190, ppid=188, state=RUNNABLE; SnapshotRegionProcedure 948ff07492d339d962636cab7d38112b 2024-12-13T00:59:17,046 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=189, ppid=188, state=RUNNABLE; SnapshotRegionProcedure 779755e67e6754487b988f6d1c68ab8b 2024-12-13T00:59:17,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=188 2024-12-13T00:59:17,197 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,46075,1734051384875 2024-12-13T00:59:17,197 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,40687,1734051385054 2024-12-13T00:59:17,198 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46075 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=190 2024-12-13T00:59:17,198 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40687 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=189 2024-12-13T00:59:17,198 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,1,1734051555982.948ff07492d339d962636cab7d38112b. 2024-12-13T00:59:17,198 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,,1734051555982.779755e67e6754487b988f6d1c68ab8b. 2024-12-13T00:59:17,198 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.HRegion(2837): Flushing 779755e67e6754487b988f6d1c68ab8b 1/1 column families, dataSize=65 B heapSize=400 B 2024-12-13T00:59:17,198 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.HRegion(2837): Flushing 948ff07492d339d962636cab7d38112b 1/1 column families, dataSize=3.19 KB heapSize=7.14 KB 2024-12-13T00:59:17,215 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithChecksum/779755e67e6754487b988f6d1c68ab8b/.tmp/cf/e5b9d1575bf249a19cbf62752425f91f is 69, key is 0b3991560e07d7248da84f2b307d1c578/cf:q/1734051556937/Put/seqid=0 2024-12-13T00:59:17,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742245_1421 (size=5149) 2024-12-13T00:59:17,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742245_1421 (size=5149) 2024-12-13T00:59:17,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742245_1421 (size=5149) 2024-12-13T00:59:17,222 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithChecksum/948ff07492d339d962636cab7d38112b/.tmp/cf/ce80e03c98134cf3b8fc326920a336ee is 71, key is 12a78fa37e21afc3e897655b305761dd/cf:q/1734051556941/Put/seqid=0 2024-12-13T00:59:17,222 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithChecksum/779755e67e6754487b988f6d1c68ab8b/.tmp/cf/e5b9d1575bf249a19cbf62752425f91f 2024-12-13T00:59:17,229 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithChecksum/779755e67e6754487b988f6d1c68ab8b/.tmp/cf/e5b9d1575bf249a19cbf62752425f91f as hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithChecksum/779755e67e6754487b988f6d1c68ab8b/cf/e5b9d1575bf249a19cbf62752425f91f 2024-12-13T00:59:17,236 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithChecksum/779755e67e6754487b988f6d1c68ab8b/cf/e5b9d1575bf249a19cbf62752425f91f, entries=1, sequenceid=6, filesize=5.0 K 2024-12-13T00:59:17,237 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.HRegion(3040): Finished flush of dataSize ~65 B/65, heapSize ~384 B/384, currentSize=0 B/0 for 779755e67e6754487b988f6d1c68ab8b in 39ms, sequenceid=6, compaction requested=false 2024-12-13T00:59:17,237 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithChecksum' 2024-12-13T00:59:17,238 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.HRegion(2538): Flush status journal for 779755e67e6754487b988f6d1c68ab8b: 2024-12-13T00:59:17,238 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,,1734051555982.779755e67e6754487b988f6d1c68ab8b. for snaptb0-testExportWithChecksum completed. 2024-12-13T00:59:17,239 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,,1734051555982.779755e67e6754487b988f6d1c68ab8b.' region-info for snapshot=snaptb0-testExportWithChecksum 2024-12-13T00:59:17,239 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-13T00:59:17,239 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithChecksum/779755e67e6754487b988f6d1c68ab8b/cf/e5b9d1575bf249a19cbf62752425f91f] hfiles 2024-12-13T00:59:17,239 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithChecksum/779755e67e6754487b988f6d1c68ab8b/cf/e5b9d1575bf249a19cbf62752425f91f for snapshot=snaptb0-testExportWithChecksum 2024-12-13T00:59:17,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742246_1422 (size=8460) 2024-12-13T00:59:17,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742246_1422 (size=8460) 2024-12-13T00:59:17,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742246_1422 (size=8460) 2024-12-13T00:59:17,258 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.19 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithChecksum/948ff07492d339d962636cab7d38112b/.tmp/cf/ce80e03c98134cf3b8fc326920a336ee 2024-12-13T00:59:17,264 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithChecksum/948ff07492d339d962636cab7d38112b/.tmp/cf/ce80e03c98134cf3b8fc326920a336ee as hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithChecksum/948ff07492d339d962636cab7d38112b/cf/ce80e03c98134cf3b8fc326920a336ee 2024-12-13T00:59:17,270 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithChecksum/948ff07492d339d962636cab7d38112b/cf/ce80e03c98134cf3b8fc326920a336ee, entries=49, sequenceid=6, filesize=8.3 K 2024-12-13T00:59:17,272 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.HRegion(3040): Finished flush of dataSize ~3.19 KB/3271, heapSize ~7.13 KB/7296, currentSize=0 B/0 for 948ff07492d339d962636cab7d38112b in 74ms, sequenceid=6, compaction requested=false 2024-12-13T00:59:17,272 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.HRegion(2538): Flush status journal for 948ff07492d339d962636cab7d38112b: 2024-12-13T00:59:17,273 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,1,1734051555982.948ff07492d339d962636cab7d38112b. for snaptb0-testExportWithChecksum completed. 2024-12-13T00:59:17,273 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,1,1734051555982.948ff07492d339d962636cab7d38112b.' region-info for snapshot=snaptb0-testExportWithChecksum 2024-12-13T00:59:17,273 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-13T00:59:17,273 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithChecksum/948ff07492d339d962636cab7d38112b/cf/ce80e03c98134cf3b8fc326920a336ee] hfiles 2024-12-13T00:59:17,273 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithChecksum/948ff07492d339d962636cab7d38112b/cf/ce80e03c98134cf3b8fc326920a336ee for snapshot=snaptb0-testExportWithChecksum 2024-12-13T00:59:17,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742247_1423 (size=107) 2024-12-13T00:59:17,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742247_1423 (size=107) 2024-12-13T00:59:17,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742247_1423 (size=107) 2024-12-13T00:59:17,291 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,,1734051555982.779755e67e6754487b988f6d1c68ab8b. 2024-12-13T00:59:17,291 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=189 2024-12-13T00:59:17,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.HMaster(4106): Remote procedure done, pid=189 2024-12-13T00:59:17,291 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithChecksum on region 779755e67e6754487b988f6d1c68ab8b 2024-12-13T00:59:17,291 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=189, ppid=188, state=RUNNABLE; SnapshotRegionProcedure 779755e67e6754487b988f6d1c68ab8b 2024-12-13T00:59:17,294 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=189, ppid=188, state=SUCCESS; SnapshotRegionProcedure 779755e67e6754487b988f6d1c68ab8b in 247 msec 2024-12-13T00:59:17,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742248_1424 (size=107) 2024-12-13T00:59:17,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742248_1424 (size=107) 2024-12-13T00:59:17,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742248_1424 (size=107) 2024-12-13T00:59:17,314 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,1,1734051555982.948ff07492d339d962636cab7d38112b. 2024-12-13T00:59:17,314 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=190 2024-12-13T00:59:17,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.HMaster(4106): Remote procedure done, pid=190 2024-12-13T00:59:17,315 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithChecksum on region 948ff07492d339d962636cab7d38112b 2024-12-13T00:59:17,315 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=190, ppid=188, state=RUNNABLE; SnapshotRegionProcedure 948ff07492d339d962636cab7d38112b 2024-12-13T00:59:17,319 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=190, resume processing ppid=188 2024-12-13T00:59:17,319 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=190, ppid=188, state=SUCCESS; SnapshotRegionProcedure 948ff07492d339d962636cab7d38112b in 272 msec 2024-12-13T00:59:17,319 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-13T00:59:17,320 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-13T00:59:17,320 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-13T00:59:17,320 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithChecksum 2024-12-13T00:59:17,321 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-13T00:59:17,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=188 2024-12-13T00:59:17,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742249_1425 (size=621) 2024-12-13T00:59:17,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742249_1425 (size=621) 2024-12-13T00:59:17,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742249_1425 (size=621) 2024-12-13T00:59:17,361 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-13T00:59:17,366 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-13T00:59:17,367 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum to hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-12-13T00:59:17,368 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-13T00:59:17,368 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 188 2024-12-13T00:59:17,369 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=188, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } in 347 msec 2024-12-13T00:59:17,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=188 2024-12-13T00:59:17,628 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithChecksum, procId: 188 completed 2024-12-13T00:59:17,629 INFO [Time-limited test {}] snapshot.TestExportSnapshot(476): Local export destination path: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/local-export-1734051557628 2024-12-13T00:59:17,629 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/local-export-1734051557628, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/local-export-1734051557628, srcFsUri=hdfs://localhost:46775, srcDir=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee 2024-12-13T00:59:17,665 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:46775, inputRoot=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee 2024-12-13T00:59:17,665 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=org.apache.hadoop.fs.LocalFileSystem@6d668592, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/local-export-1734051557628, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/local-export-1734051557628/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-13T00:59:17,666 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-13T00:59:17,670 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/snaptb0-testExportWithChecksum to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/local-export-1734051557628/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-13T00:59:17,696 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar 2024-12-13T00:59:17,697 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-13T00:59:17,697 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-13T00:59:17,697 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-13T00:59:18,954 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/hadoop-16680170229097002680.jar 2024-12-13T00:59:18,954 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-13T00:59:18,954 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-13T00:59:19,048 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/hadoop-7295376271022339025.jar 2024-12-13T00:59:19,048 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-13T00:59:19,049 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-13T00:59:19,049 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-13T00:59:19,049 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-13T00:59:19,050 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-13T00:59:19,050 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-13T00:59:19,050 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-13T00:59:19,051 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-13T00:59:19,051 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-13T00:59:19,051 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-13T00:59:19,052 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-13T00:59:19,052 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-13T00:59:19,052 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-13T00:59:19,052 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-13T00:59:19,053 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-13T00:59:19,053 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-13T00:59:19,053 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-13T00:59:19,054 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-13T00:59:19,054 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-13T00:59:19,055 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-13T00:59:19,055 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-13T00:59:19,055 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-13T00:59:19,056 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-13T00:59:19,056 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-13T00:59:19,057 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-13T00:59:19,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742250_1426 (size=127628) 2024-12-13T00:59:19,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742250_1426 (size=127628) 2024-12-13T00:59:19,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742250_1426 (size=127628) 2024-12-13T00:59:19,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742251_1427 (size=2172137) 2024-12-13T00:59:19,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742251_1427 (size=2172137) 2024-12-13T00:59:19,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742251_1427 (size=2172137) 2024-12-13T00:59:19,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742252_1428 (size=213228) 2024-12-13T00:59:19,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742252_1428 (size=213228) 2024-12-13T00:59:19,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742252_1428 (size=213228) 2024-12-13T00:59:19,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742253_1429 (size=1877034) 2024-12-13T00:59:19,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742253_1429 (size=1877034) 2024-12-13T00:59:19,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742253_1429 (size=1877034) 2024-12-13T00:59:19,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742254_1430 (size=533455) 2024-12-13T00:59:19,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742254_1430 (size=533455) 2024-12-13T00:59:19,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742254_1430 (size=533455) 2024-12-13T00:59:19,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742255_1431 (size=7280644) 2024-12-13T00:59:19,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742255_1431 (size=7280644) 2024-12-13T00:59:19,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742255_1431 (size=7280644) 2024-12-13T00:59:19,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742256_1432 (size=4188619) 2024-12-13T00:59:19,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742256_1432 (size=4188619) 2024-12-13T00:59:19,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742256_1432 (size=4188619) 2024-12-13T00:59:19,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742257_1433 (size=20406) 2024-12-13T00:59:19,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742257_1433 (size=20406) 2024-12-13T00:59:19,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742257_1433 (size=20406) 2024-12-13T00:59:19,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742258_1434 (size=6350924) 2024-12-13T00:59:19,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742258_1434 (size=6350924) 2024-12-13T00:59:19,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742258_1434 (size=6350924) 2024-12-13T00:59:19,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742259_1435 (size=75495) 2024-12-13T00:59:19,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742259_1435 (size=75495) 2024-12-13T00:59:19,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742259_1435 (size=75495) 2024-12-13T00:59:19,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742260_1436 (size=45609) 2024-12-13T00:59:19,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742260_1436 (size=45609) 2024-12-13T00:59:19,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742260_1436 (size=45609) 2024-12-13T00:59:19,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742261_1437 (size=110084) 2024-12-13T00:59:19,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742261_1437 (size=110084) 2024-12-13T00:59:19,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742261_1437 (size=110084) 2024-12-13T00:59:19,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742262_1438 (size=1323991) 2024-12-13T00:59:19,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742262_1438 (size=1323991) 2024-12-13T00:59:19,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742262_1438 (size=1323991) 2024-12-13T00:59:19,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742263_1439 (size=23076) 2024-12-13T00:59:19,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742263_1439 (size=23076) 2024-12-13T00:59:19,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742263_1439 (size=23076) 2024-12-13T00:59:19,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742264_1440 (size=126803) 2024-12-13T00:59:19,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742264_1440 (size=126803) 2024-12-13T00:59:19,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742264_1440 (size=126803) 2024-12-13T00:59:19,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742265_1441 (size=322274) 2024-12-13T00:59:19,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742265_1441 (size=322274) 2024-12-13T00:59:19,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742265_1441 (size=322274) 2024-12-13T00:59:19,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742266_1442 (size=1832290) 2024-12-13T00:59:19,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742266_1442 (size=1832290) 2024-12-13T00:59:19,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742266_1442 (size=1832290) 2024-12-13T00:59:19,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742267_1443 (size=30081) 2024-12-13T00:59:19,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742267_1443 (size=30081) 2024-12-13T00:59:19,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742267_1443 (size=30081) 2024-12-13T00:59:19,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742268_1444 (size=53616) 2024-12-13T00:59:19,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742268_1444 (size=53616) 2024-12-13T00:59:19,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742268_1444 (size=53616) 2024-12-13T00:59:19,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742269_1445 (size=29229) 2024-12-13T00:59:19,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742269_1445 (size=29229) 2024-12-13T00:59:19,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742269_1445 (size=29229) 2024-12-13T00:59:19,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742270_1446 (size=169089) 2024-12-13T00:59:19,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742270_1446 (size=169089) 2024-12-13T00:59:19,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742270_1446 (size=169089) 2024-12-13T00:59:19,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742271_1447 (size=5175431) 2024-12-13T00:59:19,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742271_1447 (size=5175431) 2024-12-13T00:59:19,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742271_1447 (size=5175431) 2024-12-13T00:59:19,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742272_1448 (size=136454) 2024-12-13T00:59:19,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742272_1448 (size=136454) 2024-12-13T00:59:19,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742272_1448 (size=136454) 2024-12-13T00:59:19,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742273_1449 (size=451756) 2024-12-13T00:59:19,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742273_1449 (size=451756) 2024-12-13T00:59:19,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742273_1449 (size=451756) 2024-12-13T00:59:19,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742274_1450 (size=907855) 2024-12-13T00:59:19,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742274_1450 (size=907855) 2024-12-13T00:59:19,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742274_1450 (size=907855) 2024-12-13T00:59:19,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742275_1451 (size=3317408) 2024-12-13T00:59:19,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742275_1451 (size=3317408) 2024-12-13T00:59:19,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742275_1451 (size=3317408) 2024-12-13T00:59:19,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742276_1452 (size=503880) 2024-12-13T00:59:19,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742276_1452 (size=503880) 2024-12-13T00:59:19,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742276_1452 (size=503880) 2024-12-13T00:59:19,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742277_1453 (size=4695811) 2024-12-13T00:59:19,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742277_1453 (size=4695811) 2024-12-13T00:59:19,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742277_1453 (size=4695811) 2024-12-13T00:59:19,455 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-13T00:59:19,458 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb0-testExportWithChecksum' hfile list 2024-12-13T00:59:19,459 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=13.3 K 2024-12-13T00:59:19,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742278_1454 (size=338) 2024-12-13T00:59:19,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742278_1454 (size=338) 2024-12-13T00:59:19,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742278_1454 (size=338) 2024-12-13T00:59:19,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742279_1455 (size=15) 2024-12-13T00:59:19,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742279_1455 (size=15) 2024-12-13T00:59:19,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742279_1455 (size=15) 2024-12-13T00:59:19,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742280_1456 (size=304931) 2024-12-13T00:59:19,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742280_1456 (size=304931) 2024-12-13T00:59:19,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742280_1456 (size=304931) 2024-12-13T00:59:20,303 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-13T00:59:20,303 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-13T00:59:20,307 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734051393214_0007_000001 (auth:SIMPLE) from 127.0.0.1:46928 2024-12-13T00:59:20,318 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/MiniMRCluster_1870862804/yarn-2296387876/MiniMRCluster_1870862804-localDir-nm-1_2/usercache/jenkins/appcache/application_1734051393214_0007/container_1734051393214_0007_01_000001/launch_container.sh] 2024-12-13T00:59:20,318 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/MiniMRCluster_1870862804/yarn-2296387876/MiniMRCluster_1870862804-localDir-nm-1_2/usercache/jenkins/appcache/application_1734051393214_0007/container_1734051393214_0007_01_000001/container_tokens] 2024-12-13T00:59:20,318 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/MiniMRCluster_1870862804/yarn-2296387876/MiniMRCluster_1870862804-localDir-nm-1_2/usercache/jenkins/appcache/application_1734051393214_0007/container_1734051393214_0007_01_000001/sysfs] 2024-12-13T00:59:21,199 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-13T00:59:21,300 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734051393214_0008_000001 (auth:SIMPLE) from 127.0.0.1:39242 2024-12-13T00:59:22,772 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-13T00:59:24,530 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum 2024-12-13T00:59:24,530 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum Metrics about Tables on a single HBase RegionServer 2024-12-13T00:59:24,530 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState 2024-12-13T00:59:26,964 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734051393214_0008_000001 (auth:SIMPLE) from 127.0.0.1:50580 2024-12-13T00:59:27,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742281_1457 (size=350605) 2024-12-13T00:59:27,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742281_1457 (size=350605) 2024-12-13T00:59:27,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742281_1457 (size=350605) 2024-12-13T00:59:28,363 DEBUG [master/9812beb1e4cd:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 1fe5c0fc12b23be0acd40e1738aa9284 changed from -1.0 to 0.0, refreshing cache 2024-12-13T00:59:28,363 DEBUG [master/9812beb1e4cd:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 779755e67e6754487b988f6d1c68ab8b changed from -1.0 to 0.0, refreshing cache 2024-12-13T00:59:28,363 DEBUG [master/9812beb1e4cd:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 44a9236bb87948b810c0985660aebd89 changed from -1.0 to 0.0, refreshing cache 2024-12-13T00:59:28,363 DEBUG [master/9812beb1e4cd:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 948ff07492d339d962636cab7d38112b changed from -1.0 to 0.0, refreshing cache 2024-12-13T00:59:29,292 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734051393214_0008_000001 (auth:SIMPLE) from 127.0.0.1:36208 2024-12-13T00:59:30,033 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-13T00:59:33,546 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/MiniMRCluster_1870862804/yarn-2296387876/MiniMRCluster_1870862804-localDir-nm-0_2/usercache/jenkins/appcache/application_1734051393214_0008/container_1734051393214_0008_01_000002/launch_container.sh] 2024-12-13T00:59:33,546 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/MiniMRCluster_1870862804/yarn-2296387876/MiniMRCluster_1870862804-localDir-nm-0_2/usercache/jenkins/appcache/application_1734051393214_0008/container_1734051393214_0008_01_000002/container_tokens] 2024-12-13T00:59:33,546 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/MiniMRCluster_1870862804/yarn-2296387876/MiniMRCluster_1870862804-localDir-nm-0_2/usercache/jenkins/appcache/application_1734051393214_0008/container_1734051393214_0008_01_000002/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithChecksum/948ff07492d339d962636cab7d38112b/cf/ce80e03c98134cf3b8fc326920a336ee and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/local-export-1734051557628/archive/data/default/testtb-testExportWithChecksum/948ff07492d339d962636cab7d38112b/cf/ce80e03c98134cf3b8fc326920a336ee. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:596) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:332) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:254) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:180) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-13T00:59:35,077 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734051393214_0008_000001 (auth:SIMPLE) from 127.0.0.1:42296 2024-12-13T00:59:37,668 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 44a9236bb87948b810c0985660aebd89, had cached 0 bytes from a total of 8460 2024-12-13T00:59:37,669 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 1fe5c0fc12b23be0acd40e1738aa9284, had cached 0 bytes from a total of 5149 Error: java.io.IOException: Checksum mismatch between hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithChecksum/948ff07492d339d962636cab7d38112b/cf/ce80e03c98134cf3b8fc326920a336ee and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/local-export-1734051557628/archive/data/default/testtb-testExportWithChecksum/948ff07492d339d962636cab7d38112b/cf/ce80e03c98134cf3b8fc326920a336ee. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:596) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:332) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:254) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:180) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-13T00:59:39,736 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/MiniMRCluster_1870862804/yarn-2296387876/MiniMRCluster_1870862804-localDir-nm-1_0/usercache/jenkins/appcache/application_1734051393214_0008/container_1734051393214_0008_01_000003/launch_container.sh] 2024-12-13T00:59:39,736 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/MiniMRCluster_1870862804/yarn-2296387876/MiniMRCluster_1870862804-localDir-nm-1_0/usercache/jenkins/appcache/application_1734051393214_0008/container_1734051393214_0008_01_000003/container_tokens] 2024-12-13T00:59:39,736 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/MiniMRCluster_1870862804/yarn-2296387876/MiniMRCluster_1870862804-localDir-nm-1_0/usercache/jenkins/appcache/application_1734051393214_0008/container_1734051393214_0008_01_000003/sysfs] 2024-12-13T00:59:41,093 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734051393214_0008_000001 (auth:SIMPLE) from 127.0.0.1:51504 2024-12-13T00:59:45,052 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/MiniMRCluster_1870862804/yarn-2296387876/MiniMRCluster_1870862804-localDir-nm-0_3/usercache/jenkins/appcache/application_1734051393214_0008/container_1734051393214_0008_01_000004/launch_container.sh] 2024-12-13T00:59:45,052 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/MiniMRCluster_1870862804/yarn-2296387876/MiniMRCluster_1870862804-localDir-nm-0_3/usercache/jenkins/appcache/application_1734051393214_0008/container_1734051393214_0008_01_000004/container_tokens] 2024-12-13T00:59:45,052 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/MiniMRCluster_1870862804/yarn-2296387876/MiniMRCluster_1870862804-localDir-nm-0_3/usercache/jenkins/appcache/application_1734051393214_0008/container_1734051393214_0008_01_000004/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithChecksum/948ff07492d339d962636cab7d38112b/cf/ce80e03c98134cf3b8fc326920a336ee and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/local-export-1734051557628/archive/data/default/testtb-testExportWithChecksum/948ff07492d339d962636cab7d38112b/cf/ce80e03c98134cf3b8fc326920a336ee. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:596) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:332) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:254) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:180) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-13T00:59:46,109 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734051393214_0008_000001 (auth:SIMPLE) from 127.0.0.1:56738 2024-12-13T00:59:49,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742282_1458 (size=21340) 2024-12-13T00:59:49,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742282_1458 (size=21340) 2024-12-13T00:59:49,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742282_1458 (size=21340) 2024-12-13T00:59:49,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742283_1459 (size=460) 2024-12-13T00:59:49,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742283_1459 (size=460) 2024-12-13T00:59:49,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742283_1459 (size=460) 2024-12-13T00:59:49,916 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/MiniMRCluster_1870862804/yarn-2296387876/MiniMRCluster_1870862804-localDir-nm-1_2/usercache/jenkins/appcache/application_1734051393214_0008/container_1734051393214_0008_01_000005/launch_container.sh] 2024-12-13T00:59:49,916 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/MiniMRCluster_1870862804/yarn-2296387876/MiniMRCluster_1870862804-localDir-nm-1_2/usercache/jenkins/appcache/application_1734051393214_0008/container_1734051393214_0008_01_000005/container_tokens] 2024-12-13T00:59:49,916 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/MiniMRCluster_1870862804/yarn-2296387876/MiniMRCluster_1870862804-localDir-nm-1_2/usercache/jenkins/appcache/application_1734051393214_0008/container_1734051393214_0008_01_000005/sysfs] 2024-12-13T00:59:49,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742284_1460 (size=21340) 2024-12-13T00:59:49,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742284_1460 (size=21340) 2024-12-13T00:59:49,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742284_1460 (size=21340) 2024-12-13T00:59:49,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742285_1461 (size=350605) 2024-12-13T00:59:49,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742285_1461 (size=350605) 2024-12-13T00:59:49,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742285_1461 (size=350605) 2024-12-13T00:59:49,959 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734051393214_0008_000001 (auth:SIMPLE) from 127.0.0.1:56754 2024-12-13T00:59:51,740 ERROR [Time-limited test {}] snapshot.ExportSnapshot(1227): Snapshot export failed org.apache.hadoop.hbase.snapshot.ExportSnapshotException: Task failed task_1734051393214_0008_m_000000 Job failed as tasks failed. failedMaps:1 failedReduces:0 killedMaps:0 killedReduces: 0 at org.apache.hadoop.hbase.snapshot.ExportSnapshot.runCopyJob(ExportSnapshot.java:935) ~[classes/:?] at org.apache.hadoop.hbase.snapshot.ExportSnapshot.doWork(ExportSnapshot.java:1204) ~[classes/:?] at org.apache.hadoop.hbase.util.AbstractHBaseTool.run(AbstractHBaseTool.java:151) ~[hbase-common-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:82) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.runExportSnapshot(TestExportSnapshot.java:523) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportFileSystemState(TestExportSnapshot.java:353) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportWithChecksum(TestExportSnapshot.java:237) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T00:59:51,742 INFO [Time-limited test {}] snapshot.TestExportSnapshot(468): HDFS export destination path: hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/export-test/export-1734051591742 2024-12-13T00:59:51,742 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=hdfs://localhost:46775, tgtDir=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/export-test/export-1734051591742, rawTgtDir=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/export-test/export-1734051591742, srcFsUri=hdfs://localhost:46775, srcDir=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee 2024-12-13T00:59:51,788 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:46775, inputRoot=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee 2024-12-13T00:59:51,788 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1349531497_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/export-test/export-1734051591742, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/export-test/export-1734051591742/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-13T00:59:51,791 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-13T00:59:51,802 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/snaptb0-testExportWithChecksum to hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/export-test/export-1734051591742/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-13T00:59:51,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742286_1462 (size=621) 2024-12-13T00:59:51,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742286_1462 (size=621) 2024-12-13T00:59:51,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742286_1462 (size=621) 2024-12-13T00:59:51,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742287_1463 (size=156) 2024-12-13T00:59:51,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742287_1463 (size=156) 2024-12-13T00:59:51,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742287_1463 (size=156) 2024-12-13T00:59:51,852 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar 2024-12-13T00:59:51,853 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-13T00:59:51,853 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-13T00:59:51,853 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-13T00:59:52,773 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-13T00:59:53,092 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/hadoop-12650052901911265485.jar 2024-12-13T00:59:53,093 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-13T00:59:53,093 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-13T00:59:53,169 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/hadoop-17151537786969404804.jar 2024-12-13T00:59:53,170 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-13T00:59:53,170 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-13T00:59:53,170 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-13T00:59:53,170 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-13T00:59:53,171 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-13T00:59:53,171 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-13T00:59:53,171 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-13T00:59:53,171 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-13T00:59:53,171 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-13T00:59:53,172 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-13T00:59:53,172 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-13T00:59:53,172 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-13T00:59:53,173 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-13T00:59:53,173 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-13T00:59:53,173 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-13T00:59:53,173 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-13T00:59:53,173 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-13T00:59:53,174 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-13T00:59:53,174 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-13T00:59:53,175 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-13T00:59:53,175 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-13T00:59:53,175 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-13T00:59:53,175 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-13T00:59:53,176 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-13T00:59:53,176 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-13T00:59:53,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742288_1464 (size=127628) 2024-12-13T00:59:53,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742288_1464 (size=127628) 2024-12-13T00:59:53,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742288_1464 (size=127628) 2024-12-13T00:59:53,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742289_1465 (size=2172137) 2024-12-13T00:59:53,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742289_1465 (size=2172137) 2024-12-13T00:59:53,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742289_1465 (size=2172137) 2024-12-13T00:59:53,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742290_1466 (size=213228) 2024-12-13T00:59:53,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742290_1466 (size=213228) 2024-12-13T00:59:53,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742290_1466 (size=213228) 2024-12-13T00:59:53,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742291_1467 (size=1877034) 2024-12-13T00:59:53,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742291_1467 (size=1877034) 2024-12-13T00:59:53,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742291_1467 (size=1877034) 2024-12-13T00:59:53,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742292_1468 (size=533455) 2024-12-13T00:59:53,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742292_1468 (size=533455) 2024-12-13T00:59:53,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742292_1468 (size=533455) 2024-12-13T00:59:53,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742293_1469 (size=7280644) 2024-12-13T00:59:53,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742293_1469 (size=7280644) 2024-12-13T00:59:53,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742293_1469 (size=7280644) 2024-12-13T00:59:53,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742294_1470 (size=4188619) 2024-12-13T00:59:53,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742294_1470 (size=4188619) 2024-12-13T00:59:53,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742294_1470 (size=4188619) 2024-12-13T00:59:53,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742295_1471 (size=6350924) 2024-12-13T00:59:53,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742295_1471 (size=6350924) 2024-12-13T00:59:53,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742295_1471 (size=6350924) 2024-12-13T00:59:53,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742296_1472 (size=20406) 2024-12-13T00:59:53,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742296_1472 (size=20406) 2024-12-13T00:59:53,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742296_1472 (size=20406) 2024-12-13T00:59:53,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742297_1473 (size=75495) 2024-12-13T00:59:53,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742297_1473 (size=75495) 2024-12-13T00:59:53,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742297_1473 (size=75495) 2024-12-13T00:59:53,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742298_1474 (size=45609) 2024-12-13T00:59:53,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742298_1474 (size=45609) 2024-12-13T00:59:53,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742298_1474 (size=45609) 2024-12-13T00:59:53,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742299_1475 (size=110084) 2024-12-13T00:59:53,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742299_1475 (size=110084) 2024-12-13T00:59:53,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742299_1475 (size=110084) 2024-12-13T00:59:53,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742300_1476 (size=1323991) 2024-12-13T00:59:53,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742300_1476 (size=1323991) 2024-12-13T00:59:53,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742300_1476 (size=1323991) 2024-12-13T00:59:53,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742301_1477 (size=23076) 2024-12-13T00:59:53,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742301_1477 (size=23076) 2024-12-13T00:59:53,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742301_1477 (size=23076) 2024-12-13T00:59:53,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742302_1478 (size=126803) 2024-12-13T00:59:53,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742302_1478 (size=126803) 2024-12-13T00:59:53,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742302_1478 (size=126803) 2024-12-13T00:59:53,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742303_1479 (size=322274) 2024-12-13T00:59:53,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742303_1479 (size=322274) 2024-12-13T00:59:53,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742303_1479 (size=322274) 2024-12-13T00:59:53,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742304_1480 (size=1832290) 2024-12-13T00:59:53,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742304_1480 (size=1832290) 2024-12-13T00:59:53,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742304_1480 (size=1832290) 2024-12-13T00:59:53,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742305_1481 (size=30081) 2024-12-13T00:59:53,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742305_1481 (size=30081) 2024-12-13T00:59:53,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742305_1481 (size=30081) 2024-12-13T00:59:53,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742306_1482 (size=53616) 2024-12-13T00:59:53,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742306_1482 (size=53616) 2024-12-13T00:59:53,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742306_1482 (size=53616) 2024-12-13T00:59:53,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742307_1483 (size=29229) 2024-12-13T00:59:53,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742307_1483 (size=29229) 2024-12-13T00:59:53,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742307_1483 (size=29229) 2024-12-13T00:59:53,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742308_1484 (size=169089) 2024-12-13T00:59:53,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742308_1484 (size=169089) 2024-12-13T00:59:53,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742308_1484 (size=169089) 2024-12-13T00:59:53,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742309_1485 (size=451756) 2024-12-13T00:59:53,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742309_1485 (size=451756) 2024-12-13T00:59:53,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742309_1485 (size=451756) 2024-12-13T00:59:53,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742310_1486 (size=5175431) 2024-12-13T00:59:53,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742310_1486 (size=5175431) 2024-12-13T00:59:53,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742310_1486 (size=5175431) 2024-12-13T00:59:53,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742311_1487 (size=136454) 2024-12-13T00:59:53,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742311_1487 (size=136454) 2024-12-13T00:59:53,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742311_1487 (size=136454) 2024-12-13T00:59:53,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742312_1488 (size=907855) 2024-12-13T00:59:53,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742312_1488 (size=907855) 2024-12-13T00:59:53,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742312_1488 (size=907855) 2024-12-13T00:59:53,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742313_1489 (size=3317408) 2024-12-13T00:59:53,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742313_1489 (size=3317408) 2024-12-13T00:59:53,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742313_1489 (size=3317408) 2024-12-13T00:59:53,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742314_1490 (size=503880) 2024-12-13T00:59:53,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742314_1490 (size=503880) 2024-12-13T00:59:53,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742314_1490 (size=503880) 2024-12-13T00:59:53,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742315_1491 (size=4695811) 2024-12-13T00:59:53,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742315_1491 (size=4695811) 2024-12-13T00:59:53,849 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-13T00:59:53,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742315_1491 (size=4695811) 2024-12-13T00:59:53,852 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb0-testExportWithChecksum' hfile list 2024-12-13T00:59:53,854 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=13.3 K 2024-12-13T00:59:53,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742316_1492 (size=338) 2024-12-13T00:59:53,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742316_1492 (size=338) 2024-12-13T00:59:53,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742316_1492 (size=338) 2024-12-13T00:59:53,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742317_1493 (size=15) 2024-12-13T00:59:53,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742317_1493 (size=15) 2024-12-13T00:59:53,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742317_1493 (size=15) 2024-12-13T00:59:53,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742318_1494 (size=304883) 2024-12-13T00:59:53,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742318_1494 (size=304883) 2024-12-13T00:59:53,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742318_1494 (size=304883) 2024-12-13T00:59:56,033 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-13T00:59:56,033 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-13T00:59:56,037 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734051393214_0008_000001 (auth:SIMPLE) from 127.0.0.1:52462 2024-12-13T00:59:56,046 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/MiniMRCluster_1870862804/yarn-2296387876/MiniMRCluster_1870862804-localDir-nm-0_1/usercache/jenkins/appcache/application_1734051393214_0008/container_1734051393214_0008_01_000001/launch_container.sh] 2024-12-13T00:59:56,046 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/MiniMRCluster_1870862804/yarn-2296387876/MiniMRCluster_1870862804-localDir-nm-0_1/usercache/jenkins/appcache/application_1734051393214_0008/container_1734051393214_0008_01_000001/container_tokens] 2024-12-13T00:59:56,046 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/MiniMRCluster_1870862804/yarn-2296387876/MiniMRCluster_1870862804-localDir-nm-0_1/usercache/jenkins/appcache/application_1734051393214_0008/container_1734051393214_0008_01_000001/sysfs] 2024-12-13T00:59:56,908 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734051393214_0009_000001 (auth:SIMPLE) from 127.0.0.1:55350 2024-12-13T01:00:01,363 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 948ff07492d339d962636cab7d38112b, had cached 0 bytes from a total of 8460 2024-12-13T01:00:01,364 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 779755e67e6754487b988f6d1c68ab8b, had cached 0 bytes from a total of 5149 2024-12-13T01:00:02,169 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734051393214_0009_000001 (auth:SIMPLE) from 127.0.0.1:42666 2024-12-13T01:00:02,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742319_1495 (size=350557) 2024-12-13T01:00:02,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742319_1495 (size=350557) 2024-12-13T01:00:02,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742319_1495 (size=350557) 2024-12-13T01:00:04,483 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734051393214_0009_000001 (auth:SIMPLE) from 127.0.0.1:42210 2024-12-13T01:00:09,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742320_1496 (size=8460) 2024-12-13T01:00:09,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742320_1496 (size=8460) 2024-12-13T01:00:09,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742320_1496 (size=8460) 2024-12-13T01:00:09,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742321_1497 (size=5149) 2024-12-13T01:00:09,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742321_1497 (size=5149) 2024-12-13T01:00:09,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742321_1497 (size=5149) 2024-12-13T01:00:09,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742322_1498 (size=17413) 2024-12-13T01:00:09,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742322_1498 (size=17413) 2024-12-13T01:00:09,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742322_1498 (size=17413) 2024-12-13T01:00:09,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742323_1499 (size=462) 2024-12-13T01:00:09,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742323_1499 (size=462) 2024-12-13T01:00:09,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742323_1499 (size=462) 2024-12-13T01:00:09,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742324_1500 (size=17413) 2024-12-13T01:00:09,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742324_1500 (size=17413) 2024-12-13T01:00:09,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742324_1500 (size=17413) 2024-12-13T01:00:09,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742325_1501 (size=350557) 2024-12-13T01:00:09,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742325_1501 (size=350557) 2024-12-13T01:00:09,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742325_1501 (size=350557) 2024-12-13T01:00:09,285 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734051393214_0009_000001 (auth:SIMPLE) from 127.0.0.1:42218 2024-12-13T01:00:11,156 INFO [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export 2024-12-13T01:00:11,158 INFO [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity. 2024-12-13T01:00:11,164 INFO [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: snaptb0-testExportWithChecksum 2024-12-13T01:00:11,164 INFO [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot 2024-12-13T01:00:11,164 INFO [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state 2024-12-13T01:00:11,164 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1349531497_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/snaptb0-testExportWithChecksum at hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-12-13T01:00:11,165 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/snaptb0-testExportWithChecksum/.snapshotinfo 2024-12-13T01:00:11,165 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/snaptb0-testExportWithChecksum/data.manifest 2024-12-13T01:00:11,165 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1349531497_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/export-test/export-1734051591742/.hbase-snapshot/snaptb0-testExportWithChecksum at hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/export-test/export-1734051591742/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-12-13T01:00:11,165 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/export-test/export-1734051591742/.hbase-snapshot/snaptb0-testExportWithChecksum/.snapshotinfo 2024-12-13T01:00:11,165 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/export-test/export-1734051591742/.hbase-snapshot/snaptb0-testExportWithChecksum/data.manifest 2024-12-13T01:00:11,172 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testExportWithChecksum 2024-12-13T01:00:11,172 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testExportWithChecksum 2024-12-13T01:00:11,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] procedure2.ProcedureExecutor(1098): Stored pid=191, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testExportWithChecksum 2024-12-13T01:00:11,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=191 2024-12-13T01:00:11,176 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734051611176"}]},"ts":"1734051611176"} 2024-12-13T01:00:11,177 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithChecksum, state=DISABLING in hbase:meta 2024-12-13T01:00:11,179 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithChecksum to state=DISABLING 2024-12-13T01:00:11,180 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=192, ppid=191, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testExportWithChecksum}] 2024-12-13T01:00:11,181 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=193, ppid=192, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=779755e67e6754487b988f6d1c68ab8b, UNASSIGN}, {pid=194, ppid=192, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=948ff07492d339d962636cab7d38112b, UNASSIGN}] 2024-12-13T01:00:11,182 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=193, ppid=192, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=779755e67e6754487b988f6d1c68ab8b, UNASSIGN 2024-12-13T01:00:11,183 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=194, ppid=192, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=948ff07492d339d962636cab7d38112b, UNASSIGN 2024-12-13T01:00:11,183 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=194 updating hbase:meta row=948ff07492d339d962636cab7d38112b, regionState=CLOSING, regionLocation=9812beb1e4cd,46075,1734051384875 2024-12-13T01:00:11,183 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=193 updating hbase:meta row=779755e67e6754487b988f6d1c68ab8b, regionState=CLOSING, regionLocation=9812beb1e4cd,40687,1734051385054 2024-12-13T01:00:11,184 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-13T01:00:11,184 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=195, ppid=193, state=RUNNABLE; CloseRegionProcedure 779755e67e6754487b988f6d1c68ab8b, server=9812beb1e4cd,40687,1734051385054}] 2024-12-13T01:00:11,185 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-13T01:00:11,185 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=196, ppid=194, state=RUNNABLE; CloseRegionProcedure 948ff07492d339d962636cab7d38112b, server=9812beb1e4cd,46075,1734051384875}] 2024-12-13T01:00:11,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=191 2024-12-13T01:00:11,336 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,40687,1734051385054 2024-12-13T01:00:11,337 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] handler.UnassignRegionHandler(124): Close 779755e67e6754487b988f6d1c68ab8b 2024-12-13T01:00:11,337 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-13T01:00:11,337 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] regionserver.HRegion(1681): Closing 779755e67e6754487b988f6d1c68ab8b, disabling compactions & flushes 2024-12-13T01:00:11,337 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,46075,1734051384875 2024-12-13T01:00:11,337 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] regionserver.HRegion(1703): Closing region testtb-testExportWithChecksum,,1734051555982.779755e67e6754487b988f6d1c68ab8b. 2024-12-13T01:00:11,337 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithChecksum,,1734051555982.779755e67e6754487b988f6d1c68ab8b. 2024-12-13T01:00:11,337 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithChecksum,,1734051555982.779755e67e6754487b988f6d1c68ab8b. after waiting 0 ms 2024-12-13T01:00:11,337 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithChecksum,,1734051555982.779755e67e6754487b988f6d1c68ab8b. 2024-12-13T01:00:11,337 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] handler.UnassignRegionHandler(124): Close 948ff07492d339d962636cab7d38112b 2024-12-13T01:00:11,338 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-13T01:00:11,338 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1681): Closing 948ff07492d339d962636cab7d38112b, disabling compactions & flushes 2024-12-13T01:00:11,338 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1703): Closing region testtb-testExportWithChecksum,1,1734051555982.948ff07492d339d962636cab7d38112b. 2024-12-13T01:00:11,338 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithChecksum,1,1734051555982.948ff07492d339d962636cab7d38112b. 2024-12-13T01:00:11,338 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithChecksum,1,1734051555982.948ff07492d339d962636cab7d38112b. after waiting 0 ms 2024-12-13T01:00:11,338 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithChecksum,1,1734051555982.948ff07492d339d962636cab7d38112b. 2024-12-13T01:00:11,342 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithChecksum/779755e67e6754487b988f6d1c68ab8b/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-13T01:00:11,342 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithChecksum/948ff07492d339d962636cab7d38112b/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-13T01:00:11,343 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-13T01:00:11,343 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-13T01:00:11,343 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] regionserver.HRegion(1922): Closed testtb-testExportWithChecksum,,1734051555982.779755e67e6754487b988f6d1c68ab8b. 2024-12-13T01:00:11,343 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1922): Closed testtb-testExportWithChecksum,1,1734051555982.948ff07492d339d962636cab7d38112b. 2024-12-13T01:00:11,343 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] regionserver.HRegion(1635): Region close journal for 779755e67e6754487b988f6d1c68ab8b: 2024-12-13T01:00:11,343 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1635): Region close journal for 948ff07492d339d962636cab7d38112b: 2024-12-13T01:00:11,345 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] handler.UnassignRegionHandler(170): Closed 779755e67e6754487b988f6d1c68ab8b 2024-12-13T01:00:11,345 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=193 updating hbase:meta row=779755e67e6754487b988f6d1c68ab8b, regionState=CLOSED 2024-12-13T01:00:11,345 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] handler.UnassignRegionHandler(170): Closed 948ff07492d339d962636cab7d38112b 2024-12-13T01:00:11,346 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=194 updating hbase:meta row=948ff07492d339d962636cab7d38112b, regionState=CLOSED 2024-12-13T01:00:11,348 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=195, resume processing ppid=193 2024-12-13T01:00:11,348 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=195, ppid=193, state=SUCCESS; CloseRegionProcedure 779755e67e6754487b988f6d1c68ab8b, server=9812beb1e4cd,40687,1734051385054 in 162 msec 2024-12-13T01:00:11,349 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=193, ppid=192, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=779755e67e6754487b988f6d1c68ab8b, UNASSIGN in 167 msec 2024-12-13T01:00:11,349 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=196, resume processing ppid=194 2024-12-13T01:00:11,349 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=196, ppid=194, state=SUCCESS; CloseRegionProcedure 948ff07492d339d962636cab7d38112b, server=9812beb1e4cd,46075,1734051384875 in 162 msec 2024-12-13T01:00:11,350 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=194, resume processing ppid=192 2024-12-13T01:00:11,350 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=194, ppid=192, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=948ff07492d339d962636cab7d38112b, UNASSIGN in 168 msec 2024-12-13T01:00:11,352 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=192, resume processing ppid=191 2024-12-13T01:00:11,352 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=192, ppid=191, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testExportWithChecksum in 171 msec 2024-12-13T01:00:11,353 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734051611352"}]},"ts":"1734051611352"} 2024-12-13T01:00:11,354 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithChecksum, state=DISABLED in hbase:meta 2024-12-13T01:00:11,356 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithChecksum to state=DISABLED 2024-12-13T01:00:11,357 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=191, state=SUCCESS; DisableTableProcedure table=testtb-testExportWithChecksum in 183 msec 2024-12-13T01:00:11,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=191 2024-12-13T01:00:11,477 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testExportWithChecksum, procId: 191 completed 2024-12-13T01:00:11,477 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testExportWithChecksum 2024-12-13T01:00:11,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] procedure2.ProcedureExecutor(1098): Stored pid=197, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-13T01:00:11,479 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=197, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-13T01:00:11,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testExportWithChecksum 2024-12-13T01:00:11,480 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=197, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-13T01:00:11,481 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40687 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testExportWithChecksum 2024-12-13T01:00:11,483 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithChecksum/779755e67e6754487b988f6d1c68ab8b 2024-12-13T01:00:11,483 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithChecksum/948ff07492d339d962636cab7d38112b 2024-12-13T01:00:11,484 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46075-0x10088dfff310001, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-13T01:00:11,484 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45231-0x10088dfff310002, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-13T01:00:11,484 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-13T01:00:11,484 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40687-0x10088dfff310003, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-13T01:00:11,485 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-12-13T01:00:11,485 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-12-13T01:00:11,485 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-12-13T01:00:11,485 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-12-13T01:00:11,485 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithChecksum/779755e67e6754487b988f6d1c68ab8b/cf, FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithChecksum/779755e67e6754487b988f6d1c68ab8b/recovered.edits] 2024-12-13T01:00:11,486 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithChecksum/948ff07492d339d962636cab7d38112b/cf, FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithChecksum/948ff07492d339d962636cab7d38112b/recovered.edits] 2024-12-13T01:00:11,486 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45231-0x10088dfff310002, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-13T01:00:11,487 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45231-0x10088dfff310002, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T01:00:11,487 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46075-0x10088dfff310001, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-13T01:00:11,487 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-13T01:00:11,487 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46075-0x10088dfff310001, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T01:00:11,487 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T01:00:11,487 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40687-0x10088dfff310003, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-13T01:00:11,487 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40687-0x10088dfff310003, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T01:00:11,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=197 2024-12-13T01:00:11,489 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-13T01:00:11,489 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-13T01:00:11,489 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-13T01:00:11,489 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-13T01:00:11,490 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithChecksum/779755e67e6754487b988f6d1c68ab8b/cf/e5b9d1575bf249a19cbf62752425f91f to hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/archive/data/default/testtb-testExportWithChecksum/779755e67e6754487b988f6d1c68ab8b/cf/e5b9d1575bf249a19cbf62752425f91f 2024-12-13T01:00:11,491 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithChecksum/948ff07492d339d962636cab7d38112b/cf/ce80e03c98134cf3b8fc326920a336ee to hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/archive/data/default/testtb-testExportWithChecksum/948ff07492d339d962636cab7d38112b/cf/ce80e03c98134cf3b8fc326920a336ee 2024-12-13T01:00:11,494 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithChecksum/779755e67e6754487b988f6d1c68ab8b/recovered.edits/9.seqid to hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/archive/data/default/testtb-testExportWithChecksum/779755e67e6754487b988f6d1c68ab8b/recovered.edits/9.seqid 2024-12-13T01:00:11,494 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithChecksum/948ff07492d339d962636cab7d38112b/recovered.edits/9.seqid to hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/archive/data/default/testtb-testExportWithChecksum/948ff07492d339d962636cab7d38112b/recovered.edits/9.seqid 2024-12-13T01:00:11,494 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithChecksum/779755e67e6754487b988f6d1c68ab8b 2024-12-13T01:00:11,494 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportWithChecksum/948ff07492d339d962636cab7d38112b 2024-12-13T01:00:11,494 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithChecksum regions 2024-12-13T01:00:11,496 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=197, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-13T01:00:11,498 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testExportWithChecksum from hbase:meta 2024-12-13T01:00:11,500 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportWithChecksum' descriptor. 2024-12-13T01:00:11,501 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=197, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-13T01:00:11,501 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportWithChecksum' from region states. 2024-12-13T01:00:11,501 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum,,1734051555982.779755e67e6754487b988f6d1c68ab8b.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734051611501"}]},"ts":"9223372036854775807"} 2024-12-13T01:00:11,501 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum,1,1734051555982.948ff07492d339d962636cab7d38112b.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734051611501"}]},"ts":"9223372036854775807"} 2024-12-13T01:00:11,503 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-13T01:00:11,503 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 779755e67e6754487b988f6d1c68ab8b, NAME => 'testtb-testExportWithChecksum,,1734051555982.779755e67e6754487b988f6d1c68ab8b.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 948ff07492d339d962636cab7d38112b, NAME => 'testtb-testExportWithChecksum,1,1734051555982.948ff07492d339d962636cab7d38112b.', STARTKEY => '1', ENDKEY => ''}] 2024-12-13T01:00:11,503 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportWithChecksum' as deleted. 2024-12-13T01:00:11,503 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734051611503"}]},"ts":"9223372036854775807"} 2024-12-13T01:00:11,505 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testExportWithChecksum state from META 2024-12-13T01:00:11,507 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=197, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-13T01:00:11,507 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=197, state=SUCCESS; DeleteTableProcedure table=testtb-testExportWithChecksum in 30 msec 2024-12-13T01:00:11,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=197 2024-12-13T01:00:11,590 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testExportWithChecksum, procId: 197 completed 2024-12-13T01:00:11,596 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithChecksum" 2024-12-13T01:00:11,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testExportWithChecksum 2024-12-13T01:00:11,598 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithChecksum" 2024-12-13T01:00:11,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testExportWithChecksum 2024-12-13T01:00:11,623 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportWithChecksum Thread=801 (was 806), OpenFileDescriptor=776 (was 791), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=623 (was 548) - SystemLoadAverage LEAK? -, ProcessCount=20 (was 17) - ProcessCount LEAK? -, AvailableMemoryMB=3201 (was 3743) 2024-12-13T01:00:11,623 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=801 is superior to 500 2024-12-13T01:00:11,644 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithSkipTmp Thread=801, OpenFileDescriptor=776, MaxFileDescriptor=1048576, SystemLoadAverage=623, ProcessCount=20, AvailableMemoryMB=3199 2024-12-13T01:00:11,644 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=801 is superior to 500 2024-12-13T01:00:11,645 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-13T01:00:11,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] procedure2.ProcedureExecutor(1098): Stored pid=198, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-13T01:00:11,647 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=198, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_PRE_OPERATION 2024-12-13T01:00:11,647 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T01:00:11,647 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithSkipTmp" procId is: 198 2024-12-13T01:00:11,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=198 2024-12-13T01:00:11,648 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=198, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-13T01:00:11,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742326_1502 (size=418) 2024-12-13T01:00:11,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742326_1502 (size=418) 2024-12-13T01:00:11,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742326_1502 (size=418) 2024-12-13T01:00:11,664 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 65374f643fb794f30eb30365b4de9b9e, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1734051611645.65374f643fb794f30eb30365b4de9b9e.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee 2024-12-13T01:00:11,668 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => 783cef47d7d0c4fe8c3db10c8a3aab92, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1734051611645.783cef47d7d0c4fe8c3db10c8a3aab92.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee 2024-12-13T01:00:11,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742327_1503 (size=79) 2024-12-13T01:00:11,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742327_1503 (size=79) 2024-12-13T01:00:11,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742327_1503 (size=79) 2024-12-13T01:00:11,685 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithSkipTmp,,1734051611645.65374f643fb794f30eb30365b4de9b9e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T01:00:11,685 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1681): Closing 65374f643fb794f30eb30365b4de9b9e, disabling compactions & flushes 2024-12-13T01:00:11,685 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithSkipTmp,,1734051611645.65374f643fb794f30eb30365b4de9b9e. 2024-12-13T01:00:11,685 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithSkipTmp,,1734051611645.65374f643fb794f30eb30365b4de9b9e. 2024-12-13T01:00:11,685 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,,1734051611645.65374f643fb794f30eb30365b4de9b9e. after waiting 0 ms 2024-12-13T01:00:11,685 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,,1734051611645.65374f643fb794f30eb30365b4de9b9e. 2024-12-13T01:00:11,685 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithSkipTmp,,1734051611645.65374f643fb794f30eb30365b4de9b9e. 2024-12-13T01:00:11,685 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1635): Region close journal for 65374f643fb794f30eb30365b4de9b9e: 2024-12-13T01:00:11,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742328_1504 (size=79) 2024-12-13T01:00:11,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742328_1504 (size=79) 2024-12-13T01:00:11,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742328_1504 (size=79) 2024-12-13T01:00:11,696 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithSkipTmp,1,1734051611645.783cef47d7d0c4fe8c3db10c8a3aab92.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T01:00:11,696 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1681): Closing 783cef47d7d0c4fe8c3db10c8a3aab92, disabling compactions & flushes 2024-12-13T01:00:11,696 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithSkipTmp,1,1734051611645.783cef47d7d0c4fe8c3db10c8a3aab92. 2024-12-13T01:00:11,696 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1734051611645.783cef47d7d0c4fe8c3db10c8a3aab92. 2024-12-13T01:00:11,696 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1734051611645.783cef47d7d0c4fe8c3db10c8a3aab92. after waiting 0 ms 2024-12-13T01:00:11,696 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,1,1734051611645.783cef47d7d0c4fe8c3db10c8a3aab92. 2024-12-13T01:00:11,696 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithSkipTmp,1,1734051611645.783cef47d7d0c4fe8c3db10c8a3aab92. 2024-12-13T01:00:11,696 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1635): Region close journal for 783cef47d7d0c4fe8c3db10c8a3aab92: 2024-12-13T01:00:11,697 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=198, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_ADD_TO_META 2024-12-13T01:00:11,698 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSkipTmp,,1734051611645.65374f643fb794f30eb30365b4de9b9e.","families":{"info":[{"qualifier":"regioninfo","vlen":78,"tag":[],"timestamp":"1734051611697"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734051611697"}]},"ts":"1734051611697"} 2024-12-13T01:00:11,698 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSkipTmp,1,1734051611645.783cef47d7d0c4fe8c3db10c8a3aab92.","families":{"info":[{"qualifier":"regioninfo","vlen":78,"tag":[],"timestamp":"1734051611697"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734051611697"}]},"ts":"1734051611697"} 2024-12-13T01:00:11,700 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-13T01:00:11,701 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=198, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-13T01:00:11,701 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734051611701"}]},"ts":"1734051611701"} 2024-12-13T01:00:11,702 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=ENABLING in hbase:meta 2024-12-13T01:00:11,706 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(202): Hosts are {9812beb1e4cd=0} racks are {/default-rack=0} 2024-12-13T01:00:11,707 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-13T01:00:11,707 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-13T01:00:11,707 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-13T01:00:11,707 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-13T01:00:11,707 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-13T01:00:11,707 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-13T01:00:11,707 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-13T01:00:11,707 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=199, ppid=198, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=65374f643fb794f30eb30365b4de9b9e, ASSIGN}, {pid=200, ppid=198, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=783cef47d7d0c4fe8c3db10c8a3aab92, ASSIGN}] 2024-12-13T01:00:11,708 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=200, ppid=198, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=783cef47d7d0c4fe8c3db10c8a3aab92, ASSIGN 2024-12-13T01:00:11,708 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=199, ppid=198, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=65374f643fb794f30eb30365b4de9b9e, ASSIGN 2024-12-13T01:00:11,709 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=200, ppid=198, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=783cef47d7d0c4fe8c3db10c8a3aab92, ASSIGN; state=OFFLINE, location=9812beb1e4cd,45231,1734051384968; forceNewPlan=false, retain=false 2024-12-13T01:00:11,709 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=199, ppid=198, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=65374f643fb794f30eb30365b4de9b9e, ASSIGN; state=OFFLINE, location=9812beb1e4cd,46075,1734051384875; forceNewPlan=false, retain=false 2024-12-13T01:00:11,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=198 2024-12-13T01:00:11,859 INFO [9812beb1e4cd:38293 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-13T01:00:11,859 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=200 updating hbase:meta row=783cef47d7d0c4fe8c3db10c8a3aab92, regionState=OPENING, regionLocation=9812beb1e4cd,45231,1734051384968 2024-12-13T01:00:11,859 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=199 updating hbase:meta row=65374f643fb794f30eb30365b4de9b9e, regionState=OPENING, regionLocation=9812beb1e4cd,46075,1734051384875 2024-12-13T01:00:11,861 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=201, ppid=200, state=RUNNABLE; OpenRegionProcedure 783cef47d7d0c4fe8c3db10c8a3aab92, server=9812beb1e4cd,45231,1734051384968}] 2024-12-13T01:00:11,862 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=202, ppid=199, state=RUNNABLE; OpenRegionProcedure 65374f643fb794f30eb30365b4de9b9e, server=9812beb1e4cd,46075,1734051384875}] 2024-12-13T01:00:11,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=198 2024-12-13T01:00:12,013 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,45231,1734051384968 2024-12-13T01:00:12,015 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,46075,1734051384875 2024-12-13T01:00:12,018 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemStateWithSkipTmp,,1734051611645.65374f643fb794f30eb30365b4de9b9e. 2024-12-13T01:00:12,019 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemStateWithSkipTmp,1,1734051611645.783cef47d7d0c4fe8c3db10c8a3aab92. 2024-12-13T01:00:12,019 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(7285): Opening region: {ENCODED => 783cef47d7d0c4fe8c3db10c8a3aab92, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1734051611645.783cef47d7d0c4fe8c3db10c8a3aab92.', STARTKEY => '1', ENDKEY => ''} 2024-12-13T01:00:12,019 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(7285): Opening region: {ENCODED => 65374f643fb794f30eb30365b4de9b9e, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1734051611645.65374f643fb794f30eb30365b4de9b9e.', STARTKEY => '', ENDKEY => '1'} 2024-12-13T01:00:12,019 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSkipTmp,1,1734051611645.783cef47d7d0c4fe8c3db10c8a3aab92. service=AccessControlService 2024-12-13T01:00:12,019 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSkipTmp,,1734051611645.65374f643fb794f30eb30365b4de9b9e. service=AccessControlService 2024-12-13T01:00:12,020 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-13T01:00:12,020 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-13T01:00:12,020 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSkipTmp 783cef47d7d0c4fe8c3db10c8a3aab92 2024-12-13T01:00:12,020 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSkipTmp 65374f643fb794f30eb30365b4de9b9e 2024-12-13T01:00:12,020 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithSkipTmp,1,1734051611645.783cef47d7d0c4fe8c3db10c8a3aab92.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T01:00:12,020 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithSkipTmp,,1734051611645.65374f643fb794f30eb30365b4de9b9e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T01:00:12,020 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(7327): checking encryption for 783cef47d7d0c4fe8c3db10c8a3aab92 2024-12-13T01:00:12,020 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(7327): checking encryption for 65374f643fb794f30eb30365b4de9b9e 2024-12-13T01:00:12,020 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(7330): checking classloading for 783cef47d7d0c4fe8c3db10c8a3aab92 2024-12-13T01:00:12,020 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(7330): checking classloading for 65374f643fb794f30eb30365b4de9b9e 2024-12-13T01:00:12,024 INFO [StoreOpener-783cef47d7d0c4fe8c3db10c8a3aab92-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 783cef47d7d0c4fe8c3db10c8a3aab92 2024-12-13T01:00:12,024 INFO [StoreOpener-65374f643fb794f30eb30365b4de9b9e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 65374f643fb794f30eb30365b4de9b9e 2024-12-13T01:00:12,026 INFO [StoreOpener-65374f643fb794f30eb30365b4de9b9e-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 65374f643fb794f30eb30365b4de9b9e columnFamilyName cf 2024-12-13T01:00:12,026 DEBUG [StoreOpener-65374f643fb794f30eb30365b4de9b9e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T01:00:12,026 INFO [StoreOpener-783cef47d7d0c4fe8c3db10c8a3aab92-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 783cef47d7d0c4fe8c3db10c8a3aab92 columnFamilyName cf 2024-12-13T01:00:12,026 DEBUG [StoreOpener-783cef47d7d0c4fe8c3db10c8a3aab92-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T01:00:12,027 INFO [StoreOpener-783cef47d7d0c4fe8c3db10c8a3aab92-1 {}] regionserver.HStore(327): Store=783cef47d7d0c4fe8c3db10c8a3aab92/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T01:00:12,028 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithSkipTmp/783cef47d7d0c4fe8c3db10c8a3aab92 2024-12-13T01:00:12,028 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithSkipTmp/783cef47d7d0c4fe8c3db10c8a3aab92 2024-12-13T01:00:12,030 INFO [StoreOpener-65374f643fb794f30eb30365b4de9b9e-1 {}] regionserver.HStore(327): Store=65374f643fb794f30eb30365b4de9b9e/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T01:00:12,030 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(1085): writing seq id for 783cef47d7d0c4fe8c3db10c8a3aab92 2024-12-13T01:00:12,031 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithSkipTmp/65374f643fb794f30eb30365b4de9b9e 2024-12-13T01:00:12,031 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithSkipTmp/65374f643fb794f30eb30365b4de9b9e 2024-12-13T01:00:12,035 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(1085): writing seq id for 65374f643fb794f30eb30365b4de9b9e 2024-12-13T01:00:12,035 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithSkipTmp/783cef47d7d0c4fe8c3db10c8a3aab92/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T01:00:12,035 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(1102): Opened 783cef47d7d0c4fe8c3db10c8a3aab92; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59779543, jitterRate=-0.1092153936624527}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-13T01:00:12,036 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(1001): Region open journal for 783cef47d7d0c4fe8c3db10c8a3aab92: 2024-12-13T01:00:12,038 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemStateWithSkipTmp,1,1734051611645.783cef47d7d0c4fe8c3db10c8a3aab92., pid=201, masterSystemTime=1734051612013 2024-12-13T01:00:12,039 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithSkipTmp/65374f643fb794f30eb30365b4de9b9e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T01:00:12,040 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(1102): Opened 65374f643fb794f30eb30365b4de9b9e; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70934888, jitterRate=0.057012200355529785}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-13T01:00:12,040 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemStateWithSkipTmp,1,1734051611645.783cef47d7d0c4fe8c3db10c8a3aab92. 2024-12-13T01:00:12,040 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(1001): Region open journal for 65374f643fb794f30eb30365b4de9b9e: 2024-12-13T01:00:12,040 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemStateWithSkipTmp,1,1734051611645.783cef47d7d0c4fe8c3db10c8a3aab92. 2024-12-13T01:00:12,041 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=200 updating hbase:meta row=783cef47d7d0c4fe8c3db10c8a3aab92, regionState=OPEN, openSeqNum=2, regionLocation=9812beb1e4cd,45231,1734051384968 2024-12-13T01:00:12,042 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemStateWithSkipTmp,,1734051611645.65374f643fb794f30eb30365b4de9b9e., pid=202, masterSystemTime=1734051612015 2024-12-13T01:00:12,045 DEBUG [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemStateWithSkipTmp,,1734051611645.65374f643fb794f30eb30365b4de9b9e. 2024-12-13T01:00:12,045 INFO [RS_OPEN_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemStateWithSkipTmp,,1734051611645.65374f643fb794f30eb30365b4de9b9e. 2024-12-13T01:00:12,045 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=201, resume processing ppid=200 2024-12-13T01:00:12,045 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=201, ppid=200, state=SUCCESS; OpenRegionProcedure 783cef47d7d0c4fe8c3db10c8a3aab92, server=9812beb1e4cd,45231,1734051384968 in 182 msec 2024-12-13T01:00:12,045 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=199 updating hbase:meta row=65374f643fb794f30eb30365b4de9b9e, regionState=OPEN, openSeqNum=2, regionLocation=9812beb1e4cd,46075,1734051384875 2024-12-13T01:00:12,047 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=200, ppid=198, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=783cef47d7d0c4fe8c3db10c8a3aab92, ASSIGN in 338 msec 2024-12-13T01:00:12,048 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=202, resume processing ppid=199 2024-12-13T01:00:12,048 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=202, ppid=199, state=SUCCESS; OpenRegionProcedure 65374f643fb794f30eb30365b4de9b9e, server=9812beb1e4cd,46075,1734051384875 in 185 msec 2024-12-13T01:00:12,051 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=199, resume processing ppid=198 2024-12-13T01:00:12,051 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=199, ppid=198, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=65374f643fb794f30eb30365b4de9b9e, ASSIGN in 341 msec 2024-12-13T01:00:12,051 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=198, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-13T01:00:12,052 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734051612051"}]},"ts":"1734051612051"} 2024-12-13T01:00:12,053 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=ENABLED in hbase:meta 2024-12-13T01:00:12,056 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=198, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_POST_OPERATION 2024-12-13T01:00:12,056 DEBUG [PEWorker-5 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testExportFileSystemStateWithSkipTmp jenkins: RWXCA 2024-12-13T01:00:12,059 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40687 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-12-13T01:00:12,067 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45231-0x10088dfff310002, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T01:00:12,067 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T01:00:12,067 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40687-0x10088dfff310003, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T01:00:12,067 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46075-0x10088dfff310001, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T01:00:12,069 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-13T01:00:12,069 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-13T01:00:12,069 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-13T01:00:12,069 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-13T01:00:12,070 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-13T01:00:12,070 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-13T01:00:12,070 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-13T01:00:12,070 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-13T01:00:12,071 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=198, state=SUCCESS; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 424 msec 2024-12-13T01:00:12,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=198 2024-12-13T01:00:12,251 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp, procId: 198 completed 2024-12-13T01:00:12,251 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testtb-testExportFileSystemStateWithSkipTmp get assigned. Timeout = 60000ms 2024-12-13T01:00:12,251 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-13T01:00:12,255 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40687 {}] regionserver.StoreScanner(1133): Switch to stream read (scanned=32795 bytes) of info 2024-12-13T01:00:12,260 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testtb-testExportFileSystemStateWithSkipTmp assigned to meta. Checking AM states. 2024-12-13T01:00:12,260 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-13T01:00:12,260 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testtb-testExportFileSystemStateWithSkipTmp assigned. 2024-12-13T01:00:12,263 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-13T01:00:12,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734051612263 (current time:1734051612263). 2024-12-13T01:00:12,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-13T01:00:12,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp VERSION not specified, setting to 2 2024-12-13T01:00:12,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-13T01:00:12,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x12e8d9d3 to 127.0.0.1:49424 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2d38e1c2 2024-12-13T01:00:12,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@72a68db7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T01:00:12,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T01:00:12,277 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35884, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T01:00:12,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x12e8d9d3 to 127.0.0.1:49424 2024-12-13T01:00:12,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T01:00:12,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0880fbca to 127.0.0.1:49424 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@625c3de9 2024-12-13T01:00:12,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@24d1df20, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T01:00:12,292 DEBUG [hconnection-0x6cd5b05b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T01:00:12,294 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35892, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T01:00:12,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0880fbca to 127.0.0.1:49424 2024-12-13T01:00:12,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T01:00:12,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-12-13T01:00:12,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-13T01:00:12,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] procedure2.ProcedureExecutor(1098): Stored pid=203, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-13T01:00:12,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 203 2024-12-13T01:00:12,301 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-13T01:00:12,302 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-13T01:00:12,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=203 2024-12-13T01:00:12,305 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-13T01:00:12,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742329_1505 (size=203) 2024-12-13T01:00:12,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742329_1505 (size=203) 2024-12-13T01:00:12,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742329_1505 (size=203) 2024-12-13T01:00:12,328 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-13T01:00:12,329 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=204, ppid=203, state=RUNNABLE; SnapshotRegionProcedure 65374f643fb794f30eb30365b4de9b9e}, {pid=205, ppid=203, state=RUNNABLE; SnapshotRegionProcedure 783cef47d7d0c4fe8c3db10c8a3aab92}] 2024-12-13T01:00:12,330 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=205, ppid=203, state=RUNNABLE; SnapshotRegionProcedure 783cef47d7d0c4fe8c3db10c8a3aab92 2024-12-13T01:00:12,330 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=204, ppid=203, state=RUNNABLE; SnapshotRegionProcedure 65374f643fb794f30eb30365b4de9b9e 2024-12-13T01:00:12,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=203 2024-12-13T01:00:12,481 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,45231,1734051384968 2024-12-13T01:00:12,481 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,46075,1734051384875 2024-12-13T01:00:12,483 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45231 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=205 2024-12-13T01:00:12,483 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46075 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=204 2024-12-13T01:00:12,483 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1734051611645.65374f643fb794f30eb30365b4de9b9e. 2024-12-13T01:00:12,483 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1734051611645.783cef47d7d0c4fe8c3db10c8a3aab92. 2024-12-13T01:00:12,484 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HRegion(2538): Flush status journal for 65374f643fb794f30eb30365b4de9b9e: 2024-12-13T01:00:12,484 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HRegion(2538): Flush status journal for 783cef47d7d0c4fe8c3db10c8a3aab92: 2024-12-13T01:00:12,484 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,1,1734051611645.783cef47d7d0c4fe8c3db10c8a3aab92. for emptySnaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-13T01:00:12,484 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,,1734051611645.65374f643fb794f30eb30365b4de9b9e. for emptySnaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-13T01:00:12,484 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,,1734051611645.65374f643fb794f30eb30365b4de9b9e.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-13T01:00:12,484 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,1,1734051611645.783cef47d7d0c4fe8c3db10c8a3aab92.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-13T01:00:12,484 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-13T01:00:12,484 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-13T01:00:12,484 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-13T01:00:12,484 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-13T01:00:12,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742330_1506 (size=82) 2024-12-13T01:00:12,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742330_1506 (size=82) 2024-12-13T01:00:12,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742330_1506 (size=82) 2024-12-13T01:00:12,502 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1734051611645.65374f643fb794f30eb30365b4de9b9e. 2024-12-13T01:00:12,502 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=204 2024-12-13T01:00:12,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.HMaster(4106): Remote procedure done, pid=204 2024-12-13T01:00:12,503 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp on region 65374f643fb794f30eb30365b4de9b9e 2024-12-13T01:00:12,503 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=204, ppid=203, state=RUNNABLE; SnapshotRegionProcedure 65374f643fb794f30eb30365b4de9b9e 2024-12-13T01:00:12,510 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=204, ppid=203, state=SUCCESS; SnapshotRegionProcedure 65374f643fb794f30eb30365b4de9b9e in 180 msec 2024-12-13T01:00:12,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742331_1507 (size=82) 2024-12-13T01:00:12,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742331_1507 (size=82) 2024-12-13T01:00:12,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742331_1507 (size=82) 2024-12-13T01:00:12,513 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1734051611645.783cef47d7d0c4fe8c3db10c8a3aab92. 2024-12-13T01:00:12,513 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=205 2024-12-13T01:00:12,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.HMaster(4106): Remote procedure done, pid=205 2024-12-13T01:00:12,513 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp on region 783cef47d7d0c4fe8c3db10c8a3aab92 2024-12-13T01:00:12,514 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=205, ppid=203, state=RUNNABLE; SnapshotRegionProcedure 783cef47d7d0c4fe8c3db10c8a3aab92 2024-12-13T01:00:12,516 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=205, resume processing ppid=203 2024-12-13T01:00:12,516 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=205, ppid=203, state=SUCCESS; SnapshotRegionProcedure 783cef47d7d0c4fe8c3db10c8a3aab92 in 185 msec 2024-12-13T01:00:12,516 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-13T01:00:12,517 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-13T01:00:12,517 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-13T01:00:12,517 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-13T01:00:12,518 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-13T01:00:12,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742332_1508 (size=585) 2024-12-13T01:00:12,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742332_1508 (size=585) 2024-12-13T01:00:12,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742332_1508 (size=585) 2024-12-13T01:00:12,554 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-13T01:00:12,560 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-13T01:00:12,560 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-13T01:00:12,562 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-13T01:00:12,562 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 203 2024-12-13T01:00:12,563 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=203, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } in 263 msec 2024-12-13T01:00:12,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=203 2024-12-13T01:00:12,606 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp, procId: 203 completed 2024-12-13T01:00:12,619 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46075 {}] regionserver.HRegion(8254): writing data to region testtb-testExportFileSystemStateWithSkipTmp,,1734051611645.65374f643fb794f30eb30365b4de9b9e. with WAL disabled. Data may be lost in the event of a crash. 2024-12-13T01:00:12,622 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45231 {}] regionserver.HRegion(8254): writing data to region testtb-testExportFileSystemStateWithSkipTmp,1,1734051611645.783cef47d7d0c4fe8c3db10c8a3aab92. with WAL disabled. Data may be lost in the event of a crash. 2024-12-13T01:00:12,630 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testExportFileSystemStateWithSkipTmp 2024-12-13T01:00:12,630 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testExportFileSystemStateWithSkipTmp,,1734051611645.65374f643fb794f30eb30365b4de9b9e. 2024-12-13T01:00:12,631 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-13T01:00:12,647 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-13T01:00:12,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734051612647 (current time:1734051612647). 2024-12-13T01:00:12,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-13T01:00:12,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testExportFileSystemStateWithSkipTmp VERSION not specified, setting to 2 2024-12-13T01:00:12,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-13T01:00:12,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4a3669b5 to 127.0.0.1:49424 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@b204fc0 2024-12-13T01:00:12,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4aa0475c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T01:00:12,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T01:00:12,663 INFO [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35906, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T01:00:12,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4a3669b5 to 127.0.0.1:49424 2024-12-13T01:00:12,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T01:00:12,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x77c0310a to 127.0.0.1:49424 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7e172e7d 2024-12-13T01:00:12,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@36c50c44, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T01:00:12,681 DEBUG [hconnection-0x2c483780-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T01:00:12,682 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33248, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T01:00:12,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x77c0310a to 127.0.0.1:49424 2024-12-13T01:00:12,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T01:00:12,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-12-13T01:00:12,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-13T01:00:12,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] procedure2.ProcedureExecutor(1098): Stored pid=206, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-13T01:00:12,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 206 2024-12-13T01:00:12,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=206 2024-12-13T01:00:12,688 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-13T01:00:12,689 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-13T01:00:12,692 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-13T01:00:12,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742333_1509 (size=198) 2024-12-13T01:00:12,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742333_1509 (size=198) 2024-12-13T01:00:12,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742333_1509 (size=198) 2024-12-13T01:00:12,702 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-13T01:00:12,702 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=207, ppid=206, state=RUNNABLE; SnapshotRegionProcedure 65374f643fb794f30eb30365b4de9b9e}, {pid=208, ppid=206, state=RUNNABLE; SnapshotRegionProcedure 783cef47d7d0c4fe8c3db10c8a3aab92}] 2024-12-13T01:00:12,703 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=208, ppid=206, state=RUNNABLE; SnapshotRegionProcedure 783cef47d7d0c4fe8c3db10c8a3aab92 2024-12-13T01:00:12,703 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=207, ppid=206, state=RUNNABLE; SnapshotRegionProcedure 65374f643fb794f30eb30365b4de9b9e 2024-12-13T01:00:12,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=206 2024-12-13T01:00:12,854 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,45231,1734051384968 2024-12-13T01:00:12,854 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45231 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=208 2024-12-13T01:00:12,855 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1734051611645.783cef47d7d0c4fe8c3db10c8a3aab92. 2024-12-13T01:00:12,855 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,46075,1734051384875 2024-12-13T01:00:12,855 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46075 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=207 2024-12-13T01:00:12,855 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.HRegion(2837): Flushing 783cef47d7d0c4fe8c3db10c8a3aab92 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-12-13T01:00:12,855 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1734051611645.65374f643fb794f30eb30365b4de9b9e. 2024-12-13T01:00:12,855 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.HRegion(2837): Flushing 65374f643fb794f30eb30365b4de9b9e 1/1 column families, dataSize=266 B heapSize=832 B 2024-12-13T01:00:12,880 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithSkipTmp/783cef47d7d0c4fe8c3db10c8a3aab92/.tmp/cf/6ae24dac28fa4ed18e9ab828f1d2028d is 71, key is 15035942adcb86b55e126c6adc21c839/cf:q/1734051612622/Put/seqid=0 2024-12-13T01:00:12,885 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithSkipTmp/65374f643fb794f30eb30365b4de9b9e/.tmp/cf/97473eeb419f4d1bace38086d08cdb2e is 71, key is 07c4e0ff1f36f65043c08114dde641f0/cf:q/1734051612619/Put/seqid=0 2024-12-13T01:00:12,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742334_1510 (size=8256) 2024-12-13T01:00:12,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742334_1510 (size=8256) 2024-12-13T01:00:12,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742334_1510 (size=8256) 2024-12-13T01:00:12,890 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.00 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithSkipTmp/783cef47d7d0c4fe8c3db10c8a3aab92/.tmp/cf/6ae24dac28fa4ed18e9ab828f1d2028d 2024-12-13T01:00:12,894 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithSkipTmp/783cef47d7d0c4fe8c3db10c8a3aab92/.tmp/cf/6ae24dac28fa4ed18e9ab828f1d2028d as hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithSkipTmp/783cef47d7d0c4fe8c3db10c8a3aab92/cf/6ae24dac28fa4ed18e9ab828f1d2028d 2024-12-13T01:00:12,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742335_1511 (size=5356) 2024-12-13T01:00:12,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742335_1511 (size=5356) 2024-12-13T01:00:12,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742335_1511 (size=5356) 2024-12-13T01:00:12,897 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=266 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithSkipTmp/65374f643fb794f30eb30365b4de9b9e/.tmp/cf/97473eeb419f4d1bace38086d08cdb2e 2024-12-13T01:00:12,899 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithSkipTmp/783cef47d7d0c4fe8c3db10c8a3aab92/cf/6ae24dac28fa4ed18e9ab828f1d2028d, entries=46, sequenceid=6, filesize=8.1 K 2024-12-13T01:00:12,900 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.HRegion(3040): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for 783cef47d7d0c4fe8c3db10c8a3aab92 in 45ms, sequenceid=6, compaction requested=false 2024-12-13T01:00:12,900 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithSkipTmp' 2024-12-13T01:00:12,901 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.HRegion(2538): Flush status journal for 783cef47d7d0c4fe8c3db10c8a3aab92: 2024-12-13T01:00:12,901 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,1,1734051611645.783cef47d7d0c4fe8c3db10c8a3aab92. for snaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-13T01:00:12,901 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,1,1734051611645.783cef47d7d0c4fe8c3db10c8a3aab92.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-13T01:00:12,901 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-13T01:00:12,901 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithSkipTmp/783cef47d7d0c4fe8c3db10c8a3aab92/cf/6ae24dac28fa4ed18e9ab828f1d2028d] hfiles 2024-12-13T01:00:12,901 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithSkipTmp/783cef47d7d0c4fe8c3db10c8a3aab92/cf/6ae24dac28fa4ed18e9ab828f1d2028d for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-13T01:00:12,903 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithSkipTmp/65374f643fb794f30eb30365b4de9b9e/.tmp/cf/97473eeb419f4d1bace38086d08cdb2e as hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithSkipTmp/65374f643fb794f30eb30365b4de9b9e/cf/97473eeb419f4d1bace38086d08cdb2e 2024-12-13T01:00:12,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742336_1512 (size=121) 2024-12-13T01:00:12,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742336_1512 (size=121) 2024-12-13T01:00:12,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742336_1512 (size=121) 2024-12-13T01:00:12,913 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1734051611645.783cef47d7d0c4fe8c3db10c8a3aab92. 2024-12-13T01:00:12,913 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=208 2024-12-13T01:00:12,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.HMaster(4106): Remote procedure done, pid=208 2024-12-13T01:00:12,914 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSkipTmp on region 783cef47d7d0c4fe8c3db10c8a3aab92 2024-12-13T01:00:12,914 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=208, ppid=206, state=RUNNABLE; SnapshotRegionProcedure 783cef47d7d0c4fe8c3db10c8a3aab92 2024-12-13T01:00:12,915 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=208, ppid=206, state=SUCCESS; SnapshotRegionProcedure 783cef47d7d0c4fe8c3db10c8a3aab92 in 212 msec 2024-12-13T01:00:12,915 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithSkipTmp/65374f643fb794f30eb30365b4de9b9e/cf/97473eeb419f4d1bace38086d08cdb2e, entries=4, sequenceid=6, filesize=5.2 K 2024-12-13T01:00:12,916 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.HRegion(3040): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for 65374f643fb794f30eb30365b4de9b9e in 61ms, sequenceid=6, compaction requested=false 2024-12-13T01:00:12,916 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.HRegion(2538): Flush status journal for 65374f643fb794f30eb30365b4de9b9e: 2024-12-13T01:00:12,916 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,,1734051611645.65374f643fb794f30eb30365b4de9b9e. for snaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-13T01:00:12,916 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,,1734051611645.65374f643fb794f30eb30365b4de9b9e.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-13T01:00:12,916 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-13T01:00:12,916 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithSkipTmp/65374f643fb794f30eb30365b4de9b9e/cf/97473eeb419f4d1bace38086d08cdb2e] hfiles 2024-12-13T01:00:12,916 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithSkipTmp/65374f643fb794f30eb30365b4de9b9e/cf/97473eeb419f4d1bace38086d08cdb2e for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-13T01:00:12,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742337_1513 (size=121) 2024-12-13T01:00:12,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742337_1513 (size=121) 2024-12-13T01:00:12,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742337_1513 (size=121) 2024-12-13T01:00:12,923 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1734051611645.65374f643fb794f30eb30365b4de9b9e. 2024-12-13T01:00:12,923 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9812beb1e4cd:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=207 2024-12-13T01:00:12,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.HMaster(4106): Remote procedure done, pid=207 2024-12-13T01:00:12,923 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSkipTmp on region 65374f643fb794f30eb30365b4de9b9e 2024-12-13T01:00:12,924 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=207, ppid=206, state=RUNNABLE; SnapshotRegionProcedure 65374f643fb794f30eb30365b4de9b9e 2024-12-13T01:00:12,925 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=207, resume processing ppid=206 2024-12-13T01:00:12,925 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-13T01:00:12,925 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=207, ppid=206, state=SUCCESS; SnapshotRegionProcedure 65374f643fb794f30eb30365b4de9b9e in 222 msec 2024-12-13T01:00:12,926 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-13T01:00:12,926 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-13T01:00:12,926 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-13T01:00:12,927 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-13T01:00:12,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742338_1514 (size=663) 2024-12-13T01:00:12,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742338_1514 (size=663) 2024-12-13T01:00:12,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742338_1514 (size=663) 2024-12-13T01:00:12,945 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-13T01:00:12,950 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-13T01:00:12,950 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-13T01:00:12,951 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-13T01:00:12,951 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 206 2024-12-13T01:00:12,952 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=206, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } in 265 msec 2024-12-13T01:00:12,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=206 2024-12-13T01:00:12,990 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp, procId: 206 completed 2024-12-13T01:00:12,990 INFO [Time-limited test {}] snapshot.TestExportSnapshot(468): HDFS export destination path: hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/export-test/export-1734051612990 2024-12-13T01:00:12,990 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=hdfs://localhost:46775, tgtDir=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/export-test/export-1734051612990, rawTgtDir=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/export-test/export-1734051612990, srcFsUri=hdfs://localhost:46775, srcDir=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee 2024-12-13T01:00:13,036 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:46775, inputRoot=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee 2024-12-13T01:00:13,036 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1349531497_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/export-test/export-1734051612990, skipTmp=true, initialOutputSnapshotDir=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/export-test/export-1734051612990/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-13T01:00:13,038 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-13T01:00:13,043 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/export-test/export-1734051612990/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-13T01:00:13,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742340_1516 (size=663) 2024-12-13T01:00:13,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742340_1516 (size=663) 2024-12-13T01:00:13,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742340_1516 (size=663) 2024-12-13T01:00:13,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742339_1515 (size=198) 2024-12-13T01:00:13,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742339_1515 (size=198) 2024-12-13T01:00:13,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742339_1515 (size=198) 2024-12-13T01:00:13,098 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar 2024-12-13T01:00:13,098 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-13T01:00:13,099 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-13T01:00:13,099 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-13T01:00:14,279 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/hadoop-3450255964402113997.jar 2024-12-13T01:00:14,279 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-13T01:00:14,279 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-13T01:00:14,357 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/hadoop-1146105577866359932.jar 2024-12-13T01:00:14,357 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-13T01:00:14,358 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-13T01:00:14,358 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-13T01:00:14,358 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-13T01:00:14,359 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-13T01:00:14,359 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-13T01:00:14,359 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-13T01:00:14,360 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-13T01:00:14,360 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-13T01:00:14,360 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-13T01:00:14,361 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-13T01:00:14,361 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-13T01:00:14,361 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-13T01:00:14,362 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-13T01:00:14,362 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-13T01:00:14,362 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-13T01:00:14,363 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-13T01:00:14,363 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-13T01:00:14,363 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/MiniMRCluster_1870862804/yarn-2296387876/MiniMRCluster_1870862804-localDir-nm-1_0/usercache/jenkins/appcache/application_1734051393214_0009/container_1734051393214_0009_01_000002/launch_container.sh] 2024-12-13T01:00:14,363 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/MiniMRCluster_1870862804/yarn-2296387876/MiniMRCluster_1870862804-localDir-nm-1_0/usercache/jenkins/appcache/application_1734051393214_0009/container_1734051393214_0009_01_000002/container_tokens] 2024-12-13T01:00:14,363 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/MiniMRCluster_1870862804/yarn-2296387876/MiniMRCluster_1870862804-localDir-nm-1_0/usercache/jenkins/appcache/application_1734051393214_0009/container_1734051393214_0009_01_000002/sysfs] 2024-12-13T01:00:14,363 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-13T01:00:14,364 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-13T01:00:14,364 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-13T01:00:14,364 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-13T01:00:14,365 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-13T01:00:14,365 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-13T01:00:14,365 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-13T01:00:14,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742341_1517 (size=127628) 2024-12-13T01:00:14,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742341_1517 (size=127628) 2024-12-13T01:00:14,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742341_1517 (size=127628) 2024-12-13T01:00:14,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742342_1518 (size=2172137) 2024-12-13T01:00:14,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742342_1518 (size=2172137) 2024-12-13T01:00:14,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742342_1518 (size=2172137) 2024-12-13T01:00:14,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742343_1519 (size=213228) 2024-12-13T01:00:14,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742343_1519 (size=213228) 2024-12-13T01:00:14,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742343_1519 (size=213228) 2024-12-13T01:00:14,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742344_1520 (size=1877034) 2024-12-13T01:00:14,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742344_1520 (size=1877034) 2024-12-13T01:00:14,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742344_1520 (size=1877034) 2024-12-13T01:00:14,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742345_1521 (size=533455) 2024-12-13T01:00:14,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742345_1521 (size=533455) 2024-12-13T01:00:14,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742345_1521 (size=533455) 2024-12-13T01:00:14,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742346_1522 (size=7280644) 2024-12-13T01:00:14,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742346_1522 (size=7280644) 2024-12-13T01:00:14,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742346_1522 (size=7280644) 2024-12-13T01:00:14,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742347_1523 (size=4188619) 2024-12-13T01:00:14,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742347_1523 (size=4188619) 2024-12-13T01:00:14,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742347_1523 (size=4188619) 2024-12-13T01:00:14,530 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp 2024-12-13T01:00:14,530 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp Metrics about Tables on a single HBase RegionServer 2024-12-13T01:00:14,531 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum 2024-12-13T01:00:14,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742348_1524 (size=20406) 2024-12-13T01:00:14,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742348_1524 (size=20406) 2024-12-13T01:00:14,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742348_1524 (size=20406) 2024-12-13T01:00:14,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742349_1525 (size=75495) 2024-12-13T01:00:14,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742349_1525 (size=75495) 2024-12-13T01:00:14,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742349_1525 (size=75495) 2024-12-13T01:00:14,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742350_1526 (size=45609) 2024-12-13T01:00:14,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742350_1526 (size=45609) 2024-12-13T01:00:14,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742350_1526 (size=45609) 2024-12-13T01:00:14,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742351_1527 (size=110084) 2024-12-13T01:00:14,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742351_1527 (size=110084) 2024-12-13T01:00:14,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742351_1527 (size=110084) 2024-12-13T01:00:14,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742352_1528 (size=451756) 2024-12-13T01:00:14,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742352_1528 (size=451756) 2024-12-13T01:00:14,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742352_1528 (size=451756) 2024-12-13T01:00:14,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742353_1529 (size=1323991) 2024-12-13T01:00:14,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742353_1529 (size=1323991) 2024-12-13T01:00:14,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742353_1529 (size=1323991) 2024-12-13T01:00:14,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742354_1530 (size=23076) 2024-12-13T01:00:14,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742354_1530 (size=23076) 2024-12-13T01:00:14,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742354_1530 (size=23076) 2024-12-13T01:00:14,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742355_1531 (size=126803) 2024-12-13T01:00:14,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742355_1531 (size=126803) 2024-12-13T01:00:14,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742355_1531 (size=126803) 2024-12-13T01:00:14,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742356_1532 (size=322274) 2024-12-13T01:00:14,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742356_1532 (size=322274) 2024-12-13T01:00:14,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742356_1532 (size=322274) 2024-12-13T01:00:14,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742357_1533 (size=6350924) 2024-12-13T01:00:14,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742357_1533 (size=6350924) 2024-12-13T01:00:14,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742357_1533 (size=6350924) 2024-12-13T01:00:14,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742358_1534 (size=1832290) 2024-12-13T01:00:14,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742358_1534 (size=1832290) 2024-12-13T01:00:14,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742358_1534 (size=1832290) 2024-12-13T01:00:14,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742359_1535 (size=30081) 2024-12-13T01:00:14,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742359_1535 (size=30081) 2024-12-13T01:00:14,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742359_1535 (size=30081) 2024-12-13T01:00:15,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742360_1536 (size=53616) 2024-12-13T01:00:15,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742360_1536 (size=53616) 2024-12-13T01:00:15,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742360_1536 (size=53616) 2024-12-13T01:00:15,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742361_1537 (size=29229) 2024-12-13T01:00:15,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742361_1537 (size=29229) 2024-12-13T01:00:15,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742361_1537 (size=29229) 2024-12-13T01:00:15,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742362_1538 (size=169089) 2024-12-13T01:00:15,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742362_1538 (size=169089) 2024-12-13T01:00:15,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742362_1538 (size=169089) 2024-12-13T01:00:15,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742363_1539 (size=5175431) 2024-12-13T01:00:15,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742363_1539 (size=5175431) 2024-12-13T01:00:15,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742363_1539 (size=5175431) 2024-12-13T01:00:15,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742364_1540 (size=136454) 2024-12-13T01:00:15,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742364_1540 (size=136454) 2024-12-13T01:00:15,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742364_1540 (size=136454) 2024-12-13T01:00:15,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742365_1541 (size=907855) 2024-12-13T01:00:15,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742365_1541 (size=907855) 2024-12-13T01:00:15,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742365_1541 (size=907855) 2024-12-13T01:00:15,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742366_1542 (size=3317408) 2024-12-13T01:00:15,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742366_1542 (size=3317408) 2024-12-13T01:00:15,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742366_1542 (size=3317408) 2024-12-13T01:00:15,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742367_1543 (size=503880) 2024-12-13T01:00:15,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742367_1543 (size=503880) 2024-12-13T01:00:15,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742367_1543 (size=503880) 2024-12-13T01:00:15,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742368_1544 (size=4695811) 2024-12-13T01:00:15,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742368_1544 (size=4695811) 2024-12-13T01:00:15,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742368_1544 (size=4695811) 2024-12-13T01:00:15,233 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-13T01:00:15,234 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb0-testExportFileSystemStateWithSkipTmp' hfile list 2024-12-13T01:00:15,236 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=13.3 K 2024-12-13T01:00:15,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742369_1545 (size=366) 2024-12-13T01:00:15,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742369_1545 (size=366) 2024-12-13T01:00:15,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742369_1545 (size=366) 2024-12-13T01:00:15,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742370_1546 (size=15) 2024-12-13T01:00:15,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742370_1546 (size=15) 2024-12-13T01:00:15,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742370_1546 (size=15) 2024-12-13T01:00:15,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742371_1547 (size=305053) 2024-12-13T01:00:15,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742371_1547 (size=305053) 2024-12-13T01:00:15,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742371_1547 (size=305053) 2024-12-13T01:00:15,358 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-13T01:00:15,358 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-13T01:00:15,362 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734051393214_0009_000001 (auth:SIMPLE) from 127.0.0.1:35926 2024-12-13T01:00:15,379 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/MiniMRCluster_1870862804/yarn-2296387876/MiniMRCluster_1870862804-localDir-nm-1_1/usercache/jenkins/appcache/application_1734051393214_0009/container_1734051393214_0009_01_000001/launch_container.sh] 2024-12-13T01:00:15,379 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/MiniMRCluster_1870862804/yarn-2296387876/MiniMRCluster_1870862804-localDir-nm-1_1/usercache/jenkins/appcache/application_1734051393214_0009/container_1734051393214_0009_01_000001/container_tokens] 2024-12-13T01:00:15,379 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/MiniMRCluster_1870862804/yarn-2296387876/MiniMRCluster_1870862804-localDir-nm-1_1/usercache/jenkins/appcache/application_1734051393214_0009/container_1734051393214_0009_01_000001/sysfs] 2024-12-13T01:00:16,048 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734051393214_0010_000001 (auth:SIMPLE) from 127.0.0.1:56316 2024-12-13T01:00:16,871 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-13T01:00:21,238 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734051393214_0010_000001 (auth:SIMPLE) from 127.0.0.1:47554 2024-12-13T01:00:21,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742372_1548 (size=350751) 2024-12-13T01:00:21,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742372_1548 (size=350751) 2024-12-13T01:00:21,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742372_1548 (size=350751) 2024-12-13T01:00:22,669 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 44a9236bb87948b810c0985660aebd89, had cached 0 bytes from a total of 8460 2024-12-13T01:00:22,669 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 1fe5c0fc12b23be0acd40e1738aa9284, had cached 0 bytes from a total of 5149 2024-12-13T01:00:22,773 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-13T01:00:23,528 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734051393214_0010_000001 (auth:SIMPLE) from 127.0.0.1:36360 2024-12-13T01:00:28,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742373_1549 (size=8256) 2024-12-13T01:00:28,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742373_1549 (size=8256) 2024-12-13T01:00:28,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742373_1549 (size=8256) 2024-12-13T01:00:28,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742374_1550 (size=5356) 2024-12-13T01:00:28,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742374_1550 (size=5356) 2024-12-13T01:00:28,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742374_1550 (size=5356) 2024-12-13T01:00:28,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742375_1551 (size=17455) 2024-12-13T01:00:28,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742375_1551 (size=17455) 2024-12-13T01:00:28,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742375_1551 (size=17455) 2024-12-13T01:00:28,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742376_1552 (size=476) 2024-12-13T01:00:28,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742376_1552 (size=476) 2024-12-13T01:00:28,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742376_1552 (size=476) 2024-12-13T01:00:28,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742377_1553 (size=17455) 2024-12-13T01:00:28,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742377_1553 (size=17455) 2024-12-13T01:00:28,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742377_1553 (size=17455) 2024-12-13T01:00:28,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742378_1554 (size=350751) 2024-12-13T01:00:28,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742378_1554 (size=350751) 2024-12-13T01:00:28,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742378_1554 (size=350751) 2024-12-13T01:00:28,524 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734051393214_0010_000001 (auth:SIMPLE) from 127.0.0.1:36370 2024-12-13T01:00:29,984 DEBUG [master/9812beb1e4cd:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 783cef47d7d0c4fe8c3db10c8a3aab92 changed from -1.0 to 0.0, refreshing cache 2024-12-13T01:00:29,984 DEBUG [master/9812beb1e4cd:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 65374f643fb794f30eb30365b4de9b9e changed from -1.0 to 0.0, refreshing cache 2024-12-13T01:00:30,517 INFO [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export 2024-12-13T01:00:30,517 INFO [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity. 2024-12-13T01:00:30,522 INFO [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-13T01:00:30,523 INFO [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot 2024-12-13T01:00:30,523 INFO [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state 2024-12-13T01:00:30,523 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1349531497_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp at hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-13T01:00:30,523 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/.snapshotinfo 2024-12-13T01:00:30,523 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/data.manifest 2024-12-13T01:00:30,523 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1349531497_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/export-test/export-1734051612990/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp at hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/export-test/export-1734051612990/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-13T01:00:30,524 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/export-test/export-1734051612990/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/.snapshotinfo 2024-12-13T01:00:30,524 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/export-test/export-1734051612990/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/data.manifest 2024-12-13T01:00:30,529 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testExportFileSystemStateWithSkipTmp 2024-12-13T01:00:30,529 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithSkipTmp 2024-12-13T01:00:30,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] procedure2.ProcedureExecutor(1098): Stored pid=209, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-13T01:00:30,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=209 2024-12-13T01:00:30,531 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734051630531"}]},"ts":"1734051630531"} 2024-12-13T01:00:30,532 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=DISABLING in hbase:meta 2024-12-13T01:00:30,534 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithSkipTmp to state=DISABLING 2024-12-13T01:00:30,535 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=210, ppid=209, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSkipTmp}] 2024-12-13T01:00:30,536 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=211, ppid=210, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=65374f643fb794f30eb30365b4de9b9e, UNASSIGN}, {pid=212, ppid=210, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=783cef47d7d0c4fe8c3db10c8a3aab92, UNASSIGN}] 2024-12-13T01:00:30,537 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=211, ppid=210, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=65374f643fb794f30eb30365b4de9b9e, UNASSIGN 2024-12-13T01:00:30,537 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=212, ppid=210, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=783cef47d7d0c4fe8c3db10c8a3aab92, UNASSIGN 2024-12-13T01:00:30,537 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=212 updating hbase:meta row=783cef47d7d0c4fe8c3db10c8a3aab92, regionState=CLOSING, regionLocation=9812beb1e4cd,45231,1734051384968 2024-12-13T01:00:30,537 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=211 updating hbase:meta row=65374f643fb794f30eb30365b4de9b9e, regionState=CLOSING, regionLocation=9812beb1e4cd,46075,1734051384875 2024-12-13T01:00:30,538 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-13T01:00:30,538 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=213, ppid=212, state=RUNNABLE; CloseRegionProcedure 783cef47d7d0c4fe8c3db10c8a3aab92, server=9812beb1e4cd,45231,1734051384968}] 2024-12-13T01:00:30,538 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-13T01:00:30,539 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=214, ppid=211, state=RUNNABLE; CloseRegionProcedure 65374f643fb794f30eb30365b4de9b9e, server=9812beb1e4cd,46075,1734051384875}] 2024-12-13T01:00:30,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=209 2024-12-13T01:00:30,689 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,45231,1734051384968 2024-12-13T01:00:30,690 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] handler.UnassignRegionHandler(124): Close 783cef47d7d0c4fe8c3db10c8a3aab92 2024-12-13T01:00:30,690 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-13T01:00:30,690 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] regionserver.HRegion(1681): Closing 783cef47d7d0c4fe8c3db10c8a3aab92, disabling compactions & flushes 2024-12-13T01:00:30,690 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithSkipTmp,1,1734051611645.783cef47d7d0c4fe8c3db10c8a3aab92. 2024-12-13T01:00:30,690 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1734051611645.783cef47d7d0c4fe8c3db10c8a3aab92. 2024-12-13T01:00:30,690 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1734051611645.783cef47d7d0c4fe8c3db10c8a3aab92. after waiting 0 ms 2024-12-13T01:00:30,690 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,1,1734051611645.783cef47d7d0c4fe8c3db10c8a3aab92. 2024-12-13T01:00:30,690 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 9812beb1e4cd,46075,1734051384875 2024-12-13T01:00:30,691 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] handler.UnassignRegionHandler(124): Close 65374f643fb794f30eb30365b4de9b9e 2024-12-13T01:00:30,691 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-13T01:00:30,691 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] regionserver.HRegion(1681): Closing 65374f643fb794f30eb30365b4de9b9e, disabling compactions & flushes 2024-12-13T01:00:30,691 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithSkipTmp,,1734051611645.65374f643fb794f30eb30365b4de9b9e. 2024-12-13T01:00:30,691 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithSkipTmp,,1734051611645.65374f643fb794f30eb30365b4de9b9e. 2024-12-13T01:00:30,691 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,,1734051611645.65374f643fb794f30eb30365b4de9b9e. after waiting 0 ms 2024-12-13T01:00:30,691 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,,1734051611645.65374f643fb794f30eb30365b4de9b9e. 2024-12-13T01:00:30,694 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithSkipTmp/783cef47d7d0c4fe8c3db10c8a3aab92/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-13T01:00:30,694 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithSkipTmp/65374f643fb794f30eb30365b4de9b9e/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-13T01:00:30,694 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-13T01:00:30,695 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithSkipTmp,,1734051611645.65374f643fb794f30eb30365b4de9b9e. 2024-12-13T01:00:30,695 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-13T01:00:30,695 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] regionserver.HRegion(1635): Region close journal for 65374f643fb794f30eb30365b4de9b9e: 2024-12-13T01:00:30,695 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithSkipTmp,1,1734051611645.783cef47d7d0c4fe8c3db10c8a3aab92. 2024-12-13T01:00:30,695 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] regionserver.HRegion(1635): Region close journal for 783cef47d7d0c4fe8c3db10c8a3aab92: 2024-12-13T01:00:30,696 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] handler.UnassignRegionHandler(170): Closed 65374f643fb794f30eb30365b4de9b9e 2024-12-13T01:00:30,696 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=211 updating hbase:meta row=65374f643fb794f30eb30365b4de9b9e, regionState=CLOSED 2024-12-13T01:00:30,696 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] handler.UnassignRegionHandler(170): Closed 783cef47d7d0c4fe8c3db10c8a3aab92 2024-12-13T01:00:30,697 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=212 updating hbase:meta row=783cef47d7d0c4fe8c3db10c8a3aab92, regionState=CLOSED 2024-12-13T01:00:30,699 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=214, resume processing ppid=211 2024-12-13T01:00:30,699 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=214, ppid=211, state=SUCCESS; CloseRegionProcedure 65374f643fb794f30eb30365b4de9b9e, server=9812beb1e4cd,46075,1734051384875 in 159 msec 2024-12-13T01:00:30,699 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=213, resume processing ppid=212 2024-12-13T01:00:30,699 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=213, ppid=212, state=SUCCESS; CloseRegionProcedure 783cef47d7d0c4fe8c3db10c8a3aab92, server=9812beb1e4cd,45231,1734051384968 in 160 msec 2024-12-13T01:00:30,700 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=211, ppid=210, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=65374f643fb794f30eb30365b4de9b9e, UNASSIGN in 163 msec 2024-12-13T01:00:30,701 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=212, resume processing ppid=210 2024-12-13T01:00:30,701 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=212, ppid=210, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=783cef47d7d0c4fe8c3db10c8a3aab92, UNASSIGN in 163 msec 2024-12-13T01:00:30,702 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=210, resume processing ppid=209 2024-12-13T01:00:30,702 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=210, ppid=209, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 166 msec 2024-12-13T01:00:30,703 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734051630703"}]},"ts":"1734051630703"} 2024-12-13T01:00:30,704 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=DISABLED in hbase:meta 2024-12-13T01:00:30,705 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithSkipTmp to state=DISABLED 2024-12-13T01:00:30,707 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=209, state=SUCCESS; DisableTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 177 msec 2024-12-13T01:00:30,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=209 2024-12-13T01:00:30,832 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp, procId: 209 completed 2024-12-13T01:00:30,833 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithSkipTmp 2024-12-13T01:00:30,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] procedure2.ProcedureExecutor(1098): Stored pid=215, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-13T01:00:30,834 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=215, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-13T01:00:30,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testExportFileSystemStateWithSkipTmp 2024-12-13T01:00:30,835 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=215, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-13T01:00:30,837 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40687 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithSkipTmp 2024-12-13T01:00:30,838 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithSkipTmp/65374f643fb794f30eb30365b4de9b9e 2024-12-13T01:00:30,838 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithSkipTmp/783cef47d7d0c4fe8c3db10c8a3aab92 2024-12-13T01:00:30,839 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithSkipTmp/783cef47d7d0c4fe8c3db10c8a3aab92/cf, FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithSkipTmp/783cef47d7d0c4fe8c3db10c8a3aab92/recovered.edits] 2024-12-13T01:00:30,839 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithSkipTmp/65374f643fb794f30eb30365b4de9b9e/cf, FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithSkipTmp/65374f643fb794f30eb30365b4de9b9e/recovered.edits] 2024-12-13T01:00:30,840 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45231-0x10088dfff310002, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-13T01:00:30,840 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40687-0x10088dfff310003, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-13T01:00:30,840 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46075-0x10088dfff310001, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-13T01:00:30,840 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-13T01:00:30,840 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-13T01:00:30,840 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-13T01:00:30,840 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-13T01:00:30,841 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-13T01:00:30,842 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-13T01:00:30,842 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46075-0x10088dfff310001, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-13T01:00:30,842 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46075-0x10088dfff310001, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T01:00:30,842 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45231-0x10088dfff310002, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-13T01:00:30,842 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45231-0x10088dfff310002, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T01:00:30,842 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T01:00:30,842 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40687-0x10088dfff310003, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-13T01:00:30,842 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40687-0x10088dfff310003, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-13T01:00:30,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=215 2024-12-13T01:00:30,843 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-13T01:00:30,843 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-13T01:00:30,843 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-13T01:00:30,844 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-13T01:00:30,844 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithSkipTmp/783cef47d7d0c4fe8c3db10c8a3aab92/cf/6ae24dac28fa4ed18e9ab828f1d2028d to hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/783cef47d7d0c4fe8c3db10c8a3aab92/cf/6ae24dac28fa4ed18e9ab828f1d2028d 2024-12-13T01:00:30,844 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithSkipTmp/65374f643fb794f30eb30365b4de9b9e/cf/97473eeb419f4d1bace38086d08cdb2e to hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/65374f643fb794f30eb30365b4de9b9e/cf/97473eeb419f4d1bace38086d08cdb2e 2024-12-13T01:00:30,847 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithSkipTmp/65374f643fb794f30eb30365b4de9b9e/recovered.edits/9.seqid to hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/65374f643fb794f30eb30365b4de9b9e/recovered.edits/9.seqid 2024-12-13T01:00:30,847 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithSkipTmp/783cef47d7d0c4fe8c3db10c8a3aab92/recovered.edits/9.seqid to hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/783cef47d7d0c4fe8c3db10c8a3aab92/recovered.edits/9.seqid 2024-12-13T01:00:30,847 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithSkipTmp/65374f643fb794f30eb30365b4de9b9e 2024-12-13T01:00:30,847 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testtb-testExportFileSystemStateWithSkipTmp/783cef47d7d0c4fe8c3db10c8a3aab92 2024-12-13T01:00:30,847 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithSkipTmp regions 2024-12-13T01:00:30,849 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=215, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-13T01:00:30,851 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithSkipTmp from hbase:meta 2024-12-13T01:00:30,853 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportFileSystemStateWithSkipTmp' descriptor. 2024-12-13T01:00:30,853 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=215, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-13T01:00:30,854 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportFileSystemStateWithSkipTmp' from region states. 2024-12-13T01:00:30,854 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp,,1734051611645.65374f643fb794f30eb30365b4de9b9e.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734051630854"}]},"ts":"9223372036854775807"} 2024-12-13T01:00:30,854 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp,1,1734051611645.783cef47d7d0c4fe8c3db10c8a3aab92.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734051630854"}]},"ts":"9223372036854775807"} 2024-12-13T01:00:30,855 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-13T01:00:30,855 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 65374f643fb794f30eb30365b4de9b9e, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1734051611645.65374f643fb794f30eb30365b4de9b9e.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 783cef47d7d0c4fe8c3db10c8a3aab92, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1734051611645.783cef47d7d0c4fe8c3db10c8a3aab92.', STARTKEY => '1', ENDKEY => ''}] 2024-12-13T01:00:30,855 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportFileSystemStateWithSkipTmp' as deleted. 2024-12-13T01:00:30,855 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734051630855"}]},"ts":"9223372036854775807"} 2024-12-13T01:00:30,857 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testExportFileSystemStateWithSkipTmp state from META 2024-12-13T01:00:30,858 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=215, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-13T01:00:30,859 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=215, state=SUCCESS; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 25 msec 2024-12-13T01:00:30,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=215 2024-12-13T01:00:30,944 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp, procId: 215 completed 2024-12-13T01:00:30,950 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemStateWithSkipTmp" 2024-12-13T01:00:30,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-13T01:00:30,952 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithSkipTmp" 2024-12-13T01:00:30,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-13T01:00:30,977 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithSkipTmp Thread=814 (was 801) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_23737356_1 at /127.0.0.1:52646 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-7481 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: ApplicationMasterLauncher #18 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1349531497_22 at /127.0.0.1:42910 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x71b2f156-shared-pool-51 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #17 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-23 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x71b2f156-shared-pool-52 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1349531497_22 at /127.0.0.1:37634 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x71b2f156-shared-pool-50 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 15433) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_23737356_1 at /127.0.0.1:37614 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34209 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1349531497_22 at /127.0.0.1:52672 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1941338361) connection to localhost/127.0.0.1:34209 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HFileArchiver-24 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x71b2f156-shared-pool-53 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=779 (was 776) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=587 (was 623), ProcessCount=17 (was 20), AvailableMemoryMB=3061 (was 3199) 2024-12-13T01:00:30,978 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=814 is superior to 500 2024-12-13T01:00:30,978 INFO [Time-limited test {}] hbase.HBaseTestingUtility(2861): Stopping mini mapreduce cluster... 2024-12-13T01:00:30,987 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@592fdfd3{node,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-13T01:00:30,990 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@779c3c65{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-13T01:00:30,990 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-13T01:00:30,990 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@34c8e5c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-13T01:00:30,991 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@370d6475{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/hadoop.log.dir/,STOPPED} 2024-12-13T01:00:33,486 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/MiniMRCluster_1870862804/yarn-2296387876/MiniMRCluster_1870862804-localDir-nm-1_2/usercache/jenkins/appcache/application_1734051393214_0010/container_1734051393214_0010_01_000002/launch_container.sh] 2024-12-13T01:00:33,486 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/MiniMRCluster_1870862804/yarn-2296387876/MiniMRCluster_1870862804-localDir-nm-1_2/usercache/jenkins/appcache/application_1734051393214_0010/container_1734051393214_0010_01_000002/container_tokens] 2024-12-13T01:00:33,486 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/MiniMRCluster_1870862804/yarn-2296387876/MiniMRCluster_1870862804-localDir-nm-1_2/usercache/jenkins/appcache/application_1734051393214_0010/container_1734051393214_0010_01_000002/sysfs] 2024-12-13T01:00:34,530 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp 2024-12-13T01:00:34,621 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734051393214_0010_000001 (auth:SIMPLE) from 127.0.0.1:50144 2024-12-13T01:00:34,633 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/MiniMRCluster_1870862804/yarn-2296387876/MiniMRCluster_1870862804-localDir-nm-0_2/usercache/jenkins/appcache/application_1734051393214_0010/container_1734051393214_0010_01_000001/launch_container.sh] 2024-12-13T01:00:34,633 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/MiniMRCluster_1870862804/yarn-2296387876/MiniMRCluster_1870862804-localDir-nm-0_2/usercache/jenkins/appcache/application_1734051393214_0010/container_1734051393214_0010_01_000001/container_tokens] 2024-12-13T01:00:34,633 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/MiniMRCluster_1870862804/yarn-2296387876/MiniMRCluster_1870862804-localDir-nm-0_2/usercache/jenkins/appcache/application_1734051393214_0010/container_1734051393214_0010_01_000001/sysfs] 2024-12-13T01:00:36,216 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-13T01:00:48,003 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6920e688{node,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-13T01:00:48,004 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@188b6f48{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-13T01:00:48,004 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-13T01:00:48,004 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4b778ac2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-13T01:00:48,004 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@145f37c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/hadoop.log.dir/,STOPPED} 2024-12-13T01:00:52,773 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-13T01:01:05,012 ERROR [Thread[Thread-407,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-12-13T01:01:05,012 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@44c86ad0{cluster,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/cluster} 2024-12-13T01:01:05,013 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7c37b269{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-13T01:01:05,013 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-13T01:01:05,013 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5ca29929{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-13T01:01:05,013 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@275baefc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/hadoop.log.dir/,STOPPED} 2024-12-13T01:01:05,018 WARN [ApplicationMaster Launcher {}] amlauncher.ApplicationMasterLauncher$LauncherThread(122): org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher$LauncherThread interrupted. Returning. 2024-12-13T01:01:05,027 ERROR [SchedulerEventDispatcher:Event Processor {}] event.EventDispatcher$EventProcessor(72): Returning, interrupted : java.lang.InterruptedException 2024-12-13T01:01:05,028 ERROR [ResourceManager Event Processor Monitor {}] resourcemanager.ResourceManager$SchedulerEventDispatcher$EventProcessorMonitor(1193): Returning, interrupted : java.lang.InterruptedException: sleep interrupted 2024-12-13T01:01:05,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741830_1006 (size=947281) 2024-12-13T01:01:05,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741830_1006 (size=947281) 2024-12-13T01:01:05,033 ERROR [Thread[Thread-430,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-12-13T01:01:05,036 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@f3a232c{jobhistory,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/jobhistory} 2024-12-13T01:01:05,037 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@57670a5{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-13T01:01:05,037 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-13T01:01:05,038 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2bc4bd65{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-13T01:01:05,038 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@65067abe{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/hadoop.log.dir/,STOPPED} 2024-12-13T01:01:05,039 ERROR [Thread[Thread-389,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-12-13T01:01:05,039 INFO [Time-limited test {}] hbase.HBaseTestingUtility(2864): Mini mapreduce cluster stopped 2024-12-13T01:01:05,039 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-12-13T01:01:05,040 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-13T01:01:05,040 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5634e099 to 127.0.0.1:49424 2024-12-13T01:01:05,040 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T01:01:05,040 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-13T01:01:05,040 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1452181362, stopped=false 2024-12-13T01:01:05,040 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-13T01:01:05,040 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-12-13T01:01:05,041 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=9812beb1e4cd,38293,1734051383822 2024-12-13T01:01:05,042 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-13T01:01:05,042 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40687-0x10088dfff310003, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-13T01:01:05,042 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46075-0x10088dfff310001, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-13T01:01:05,042 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T01:01:05,042 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40687-0x10088dfff310003, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T01:01:05,042 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46075-0x10088dfff310001, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T01:01:05,042 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45231-0x10088dfff310002, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-13T01:01:05,042 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45231-0x10088dfff310002, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T01:01:05,042 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-12-13T01:01:05,043 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T01:01:05,043 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '9812beb1e4cd,46075,1734051384875' ***** 2024-12-13T01:01:05,043 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46075-0x10088dfff310001, quorum=127.0.0.1:49424, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-13T01:01:05,043 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-13T01:01:05,043 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-13T01:01:05,043 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '9812beb1e4cd,45231,1734051384968' ***** 2024-12-13T01:01:05,043 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-13T01:01:05,043 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:40687-0x10088dfff310003, quorum=127.0.0.1:49424, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-13T01:01:05,043 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-13T01:01:05,043 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '9812beb1e4cd,40687,1734051385054' ***** 2024-12-13T01:01:05,043 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-13T01:01:05,043 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-13T01:01:05,044 INFO [RS:2;9812beb1e4cd:40687 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-13T01:01:05,044 INFO [RS:1;9812beb1e4cd:45231 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-13T01:01:05,044 INFO [RS:2;9812beb1e4cd:40687 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-13T01:01:05,044 INFO [RS:1;9812beb1e4cd:45231 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-13T01:01:05,044 INFO [RS:1;9812beb1e4cd:45231 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-13T01:01:05,044 INFO [RS:2;9812beb1e4cd:40687 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-13T01:01:05,044 INFO [RS:2;9812beb1e4cd:40687 {}] regionserver.HRegionServer(3579): Received CLOSE for f365b65dae95a0516608c8eeb7b680b7 2024-12-13T01:01:05,044 INFO [RS:1;9812beb1e4cd:45231 {}] regionserver.HRegionServer(3579): Received CLOSE for c77b7057609be1e5abde48e096a7d0b7 2024-12-13T01:01:05,045 INFO [RS:0;9812beb1e4cd:46075 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-13T01:01:05,045 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-13T01:01:05,045 INFO [RS:0;9812beb1e4cd:46075 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-13T01:01:05,045 INFO [RS:0;9812beb1e4cd:46075 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-13T01:01:05,045 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-13T01:01:05,045 INFO [RS:2;9812beb1e4cd:40687 {}] regionserver.HRegionServer(3579): Received CLOSE for 1fe5c0fc12b23be0acd40e1738aa9284 2024-12-13T01:01:05,045 INFO [RS:0;9812beb1e4cd:46075 {}] regionserver.HRegionServer(3579): Received CLOSE for 44a9236bb87948b810c0985660aebd89 2024-12-13T01:01:05,045 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-13T01:01:05,045 INFO [RS:2;9812beb1e4cd:40687 {}] regionserver.HRegionServer(1224): stopping server 9812beb1e4cd,40687,1734051385054 2024-12-13T01:01:05,045 INFO [RS:0;9812beb1e4cd:46075 {}] regionserver.HRegionServer(1224): stopping server 9812beb1e4cd,46075,1734051384875 2024-12-13T01:01:05,045 DEBUG [RS:2;9812beb1e4cd:40687 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T01:01:05,045 DEBUG [RS:0;9812beb1e4cd:46075 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T01:01:05,045 INFO [RS:2;9812beb1e4cd:40687 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-13T01:01:05,045 INFO [RS:0;9812beb1e4cd:46075 {}] regionserver.HRegionServer(1599): Waiting on 1 regions to close 2024-12-13T01:01:05,045 INFO [RS:2;9812beb1e4cd:40687 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-13T01:01:05,045 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-13T01:01:05,045 DEBUG [RS:0;9812beb1e4cd:46075 {}] regionserver.HRegionServer(1603): Online Regions={44a9236bb87948b810c0985660aebd89=testExportExpiredSnapshot,1,1734051532214.44a9236bb87948b810c0985660aebd89.} 2024-12-13T01:01:05,045 INFO [RS:2;9812beb1e4cd:40687 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-13T01:01:05,045 INFO [RS:2;9812beb1e4cd:40687 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-13T01:01:05,045 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing f365b65dae95a0516608c8eeb7b680b7, disabling compactions & flushes 2024-12-13T01:01:05,045 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 44a9236bb87948b810c0985660aebd89, disabling compactions & flushes 2024-12-13T01:01:05,045 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:acl,,1734051388980.f365b65dae95a0516608c8eeb7b680b7. 2024-12-13T01:01:05,045 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region testExportExpiredSnapshot,1,1734051532214.44a9236bb87948b810c0985660aebd89. 2024-12-13T01:01:05,045 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:acl,,1734051388980.f365b65dae95a0516608c8eeb7b680b7. 2024-12-13T01:01:05,045 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on testExportExpiredSnapshot,1,1734051532214.44a9236bb87948b810c0985660aebd89. 2024-12-13T01:01:05,045 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on testExportExpiredSnapshot,1,1734051532214.44a9236bb87948b810c0985660aebd89. after waiting 0 ms 2024-12-13T01:01:05,045 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:acl,,1734051388980.f365b65dae95a0516608c8eeb7b680b7. after waiting 0 ms 2024-12-13T01:01:05,046 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:acl,,1734051388980.f365b65dae95a0516608c8eeb7b680b7. 2024-12-13T01:01:05,046 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region testExportExpiredSnapshot,1,1734051532214.44a9236bb87948b810c0985660aebd89. 2024-12-13T01:01:05,046 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing f365b65dae95a0516608c8eeb7b680b7 1/1 column families, dataSize=1.38 KB heapSize=3.33 KB 2024-12-13T01:01:05,046 INFO [RS:1;9812beb1e4cd:45231 {}] regionserver.HRegionServer(1224): stopping server 9812beb1e4cd,45231,1734051384968 2024-12-13T01:01:05,046 DEBUG [RS:1;9812beb1e4cd:45231 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T01:01:05,046 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45231-0x10088dfff310002, quorum=127.0.0.1:49424, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-13T01:01:05,046 INFO [RS:1;9812beb1e4cd:45231 {}] regionserver.HRegionServer(1599): Waiting on 1 regions to close 2024-12-13T01:01:05,046 DEBUG [RS:1;9812beb1e4cd:45231 {}] regionserver.HRegionServer(1603): Online Regions={c77b7057609be1e5abde48e096a7d0b7=hbase:namespace,,1734051388084.c77b7057609be1e5abde48e096a7d0b7.} 2024-12-13T01:01:05,046 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing c77b7057609be1e5abde48e096a7d0b7, disabling compactions & flushes 2024-12-13T01:01:05,046 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1734051388084.c77b7057609be1e5abde48e096a7d0b7. 2024-12-13T01:01:05,046 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1734051388084.c77b7057609be1e5abde48e096a7d0b7. 2024-12-13T01:01:05,046 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1734051388084.c77b7057609be1e5abde48e096a7d0b7. after waiting 0 ms 2024-12-13T01:01:05,046 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1734051388084.c77b7057609be1e5abde48e096a7d0b7. 2024-12-13T01:01:05,046 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing c77b7057609be1e5abde48e096a7d0b7 1/1 column families, dataSize=78 B heapSize=488 B 2024-12-13T01:01:05,047 INFO [RS:2;9812beb1e4cd:40687 {}] regionserver.HRegionServer(1599): Waiting on 3 regions to close 2024-12-13T01:01:05,047 DEBUG [RS:2;9812beb1e4cd:40687 {}] regionserver.HRegionServer(1603): Online Regions={f365b65dae95a0516608c8eeb7b680b7=hbase:acl,,1734051388980.f365b65dae95a0516608c8eeb7b680b7., 1588230740=hbase:meta,,1.1588230740, 1fe5c0fc12b23be0acd40e1738aa9284=testExportExpiredSnapshot,,1734051532214.1fe5c0fc12b23be0acd40e1738aa9284.} 2024-12-13T01:01:05,047 DEBUG [RS_CLOSE_META-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-13T01:01:05,048 INFO [RS_CLOSE_META-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-13T01:01:05,048 DEBUG [RS_CLOSE_META-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-13T01:01:05,048 DEBUG [RS_CLOSE_META-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-13T01:01:05,048 DEBUG [RS_CLOSE_META-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-13T01:01:05,048 INFO [RS_CLOSE_META-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=68.66 KB heapSize=109 KB 2024-12-13T01:01:05,049 DEBUG [RS:1;9812beb1e4cd:45231 {}] regionserver.HRegionServer(1629): Waiting on c77b7057609be1e5abde48e096a7d0b7 2024-12-13T01:01:05,049 DEBUG [RS:0;9812beb1e4cd:46075 {}] regionserver.HRegionServer(1629): Waiting on 44a9236bb87948b810c0985660aebd89 2024-12-13T01:01:05,050 DEBUG [RS:2;9812beb1e4cd:40687 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 1fe5c0fc12b23be0acd40e1738aa9284, f365b65dae95a0516608c8eeb7b680b7 2024-12-13T01:01:05,051 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testExportExpiredSnapshot/44a9236bb87948b810c0985660aebd89/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-13T01:01:05,052 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-13T01:01:05,052 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed testExportExpiredSnapshot,1,1734051532214.44a9236bb87948b810c0985660aebd89. 2024-12-13T01:01:05,052 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 44a9236bb87948b810c0985660aebd89: 2024-12-13T01:01:05,052 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testExportExpiredSnapshot,1,1734051532214.44a9236bb87948b810c0985660aebd89. 2024-12-13T01:01:05,069 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/hbase/acl/f365b65dae95a0516608c8eeb7b680b7/.tmp/l/06647c0c102840f282e34ab6fd7c3151 is 74, key is testtb-testExportFileSystemStateWithMergeRegion-1/l:/1734051529470/DeleteFamily/seqid=0 2024-12-13T01:01:05,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742379_1555 (size=5695) 2024-12-13T01:01:05,074 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/hbase/namespace/c77b7057609be1e5abde48e096a7d0b7/.tmp/info/183150b459154f8f8e0e875d09411251 is 45, key is default/info:d/1734051388771/Put/seqid=0 2024-12-13T01:01:05,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742379_1555 (size=5695) 2024-12-13T01:01:05,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742379_1555 (size=5695) 2024-12-13T01:01:05,075 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.38 KB at sequenceid=27 (bloomFilter=false), to=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/hbase/acl/f365b65dae95a0516608c8eeb7b680b7/.tmp/l/06647c0c102840f282e34ab6fd7c3151 2024-12-13T01:01:05,079 DEBUG [RS_CLOSE_META-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/hbase/meta/1588230740/.tmp/info/fafb7ae4ea1b4aa3acf331223c87136b is 173, key is testExportExpiredSnapshot,1,1734051532214.44a9236bb87948b810c0985660aebd89./info:regioninfo/1734051532680/Put/seqid=0 2024-12-13T01:01:05,086 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 06647c0c102840f282e34ab6fd7c3151 2024-12-13T01:01:05,087 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/hbase/acl/f365b65dae95a0516608c8eeb7b680b7/.tmp/l/06647c0c102840f282e34ab6fd7c3151 as hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/hbase/acl/f365b65dae95a0516608c8eeb7b680b7/l/06647c0c102840f282e34ab6fd7c3151 2024-12-13T01:01:05,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742380_1556 (size=5037) 2024-12-13T01:01:05,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742380_1556 (size=5037) 2024-12-13T01:01:05,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742380_1556 (size=5037) 2024-12-13T01:01:05,089 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/hbase/namespace/c77b7057609be1e5abde48e096a7d0b7/.tmp/info/183150b459154f8f8e0e875d09411251 2024-12-13T01:01:05,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742381_1557 (size=15630) 2024-12-13T01:01:05,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742381_1557 (size=15630) 2024-12-13T01:01:05,091 INFO [regionserver/9812beb1e4cd:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-13T01:01:05,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742381_1557 (size=15630) 2024-12-13T01:01:05,092 INFO [RS_CLOSE_META-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.26 KB at sequenceid=202 (bloomFilter=true), to=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/hbase/meta/1588230740/.tmp/info/fafb7ae4ea1b4aa3acf331223c87136b 2024-12-13T01:01:05,093 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 06647c0c102840f282e34ab6fd7c3151 2024-12-13T01:01:05,093 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/hbase/acl/f365b65dae95a0516608c8eeb7b680b7/l/06647c0c102840f282e34ab6fd7c3151, entries=12, sequenceid=27, filesize=5.6 K 2024-12-13T01:01:05,094 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~1.38 KB/1412, heapSize ~3.31 KB/3392, currentSize=0 B/0 for f365b65dae95a0516608c8eeb7b680b7 in 48ms, sequenceid=27, compaction requested=false 2024-12-13T01:01:05,094 INFO [regionserver/9812beb1e4cd:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-13T01:01:05,094 INFO [regionserver/9812beb1e4cd:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-13T01:01:05,094 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/hbase/namespace/c77b7057609be1e5abde48e096a7d0b7/.tmp/info/183150b459154f8f8e0e875d09411251 as hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/hbase/namespace/c77b7057609be1e5abde48e096a7d0b7/info/183150b459154f8f8e0e875d09411251 2024-12-13T01:01:05,098 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/hbase/acl/f365b65dae95a0516608c8eeb7b680b7/recovered.edits/30.seqid, newMaxSeqId=30, maxSeqId=1 2024-12-13T01:01:05,098 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-13T01:01:05,098 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:acl,,1734051388980.f365b65dae95a0516608c8eeb7b680b7. 2024-12-13T01:01:05,098 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for f365b65dae95a0516608c8eeb7b680b7: 2024-12-13T01:01:05,098 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:acl,,1734051388980.f365b65dae95a0516608c8eeb7b680b7. 2024-12-13T01:01:05,099 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 1fe5c0fc12b23be0acd40e1738aa9284, disabling compactions & flushes 2024-12-13T01:01:05,099 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region testExportExpiredSnapshot,,1734051532214.1fe5c0fc12b23be0acd40e1738aa9284. 2024-12-13T01:01:05,099 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on testExportExpiredSnapshot,,1734051532214.1fe5c0fc12b23be0acd40e1738aa9284. 2024-12-13T01:01:05,099 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on testExportExpiredSnapshot,,1734051532214.1fe5c0fc12b23be0acd40e1738aa9284. after waiting 0 ms 2024-12-13T01:01:05,099 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region testExportExpiredSnapshot,,1734051532214.1fe5c0fc12b23be0acd40e1738aa9284. 2024-12-13T01:01:05,099 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/hbase/namespace/c77b7057609be1e5abde48e096a7d0b7/info/183150b459154f8f8e0e875d09411251, entries=2, sequenceid=6, filesize=4.9 K 2024-12-13T01:01:05,100 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for c77b7057609be1e5abde48e096a7d0b7 in 54ms, sequenceid=6, compaction requested=false 2024-12-13T01:01:05,102 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/default/testExportExpiredSnapshot/1fe5c0fc12b23be0acd40e1738aa9284/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-13T01:01:05,102 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-13T01:01:05,102 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed testExportExpiredSnapshot,,1734051532214.1fe5c0fc12b23be0acd40e1738aa9284. 2024-12-13T01:01:05,102 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 1fe5c0fc12b23be0acd40e1738aa9284: 2024-12-13T01:01:05,103 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testExportExpiredSnapshot,,1734051532214.1fe5c0fc12b23be0acd40e1738aa9284. 2024-12-13T01:01:05,103 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/hbase/namespace/c77b7057609be1e5abde48e096a7d0b7/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-13T01:01:05,103 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-13T01:01:05,104 INFO [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1734051388084.c77b7057609be1e5abde48e096a7d0b7. 2024-12-13T01:01:05,104 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for c77b7057609be1e5abde48e096a7d0b7: 2024-12-13T01:01:05,104 DEBUG [RS_CLOSE_REGION-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1734051388084.c77b7057609be1e5abde48e096a7d0b7. 2024-12-13T01:01:05,106 INFO [regionserver/9812beb1e4cd:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-13T01:01:05,113 DEBUG [RS_CLOSE_META-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/hbase/meta/1588230740/.tmp/rep_barrier/93cbe0226a564a69bf4c22319459f55e is 133, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1734051508881.b0972bb6f077a6b1eb2abc41db866af8./rep_barrier:/1734051529493/DeleteFamily/seqid=0 2024-12-13T01:01:05,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742382_1558 (size=8007) 2024-12-13T01:01:05,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742382_1558 (size=8007) 2024-12-13T01:01:05,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742382_1558 (size=8007) 2024-12-13T01:01:05,118 INFO [RS_CLOSE_META-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.34 KB at sequenceid=202 (bloomFilter=true), to=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/hbase/meta/1588230740/.tmp/rep_barrier/93cbe0226a564a69bf4c22319459f55e 2024-12-13T01:01:05,120 INFO [regionserver/9812beb1e4cd:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-13T01:01:05,131 INFO [regionserver/9812beb1e4cd:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-13T01:01:05,131 INFO [regionserver/9812beb1e4cd:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-13T01:01:05,137 DEBUG [RS_CLOSE_META-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/hbase/meta/1588230740/.tmp/table/a6eca33ae5344e9aaba86bdec5ef35ed is 127, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1734051508881.b0972bb6f077a6b1eb2abc41db866af8./table:/1734051529493/DeleteFamily/seqid=0 2024-12-13T01:01:05,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073742383_1559 (size=8861) 2024-12-13T01:01:05,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073742383_1559 (size=8861) 2024-12-13T01:01:05,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073742383_1559 (size=8861) 2024-12-13T01:01:05,143 INFO [RS_CLOSE_META-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.06 KB at sequenceid=202 (bloomFilter=true), to=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/hbase/meta/1588230740/.tmp/table/a6eca33ae5344e9aaba86bdec5ef35ed 2024-12-13T01:01:05,147 DEBUG [RS_CLOSE_META-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/hbase/meta/1588230740/.tmp/info/fafb7ae4ea1b4aa3acf331223c87136b as hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/hbase/meta/1588230740/info/fafb7ae4ea1b4aa3acf331223c87136b 2024-12-13T01:01:05,151 INFO [RS_CLOSE_META-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/hbase/meta/1588230740/info/fafb7ae4ea1b4aa3acf331223c87136b, entries=84, sequenceid=202, filesize=15.3 K 2024-12-13T01:01:05,151 DEBUG [RS_CLOSE_META-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/hbase/meta/1588230740/.tmp/rep_barrier/93cbe0226a564a69bf4c22319459f55e as hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/hbase/meta/1588230740/rep_barrier/93cbe0226a564a69bf4c22319459f55e 2024-12-13T01:01:05,155 INFO [RS_CLOSE_META-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/hbase/meta/1588230740/rep_barrier/93cbe0226a564a69bf4c22319459f55e, entries=21, sequenceid=202, filesize=7.8 K 2024-12-13T01:01:05,156 DEBUG [RS_CLOSE_META-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/hbase/meta/1588230740/.tmp/table/a6eca33ae5344e9aaba86bdec5ef35ed as hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/hbase/meta/1588230740/table/a6eca33ae5344e9aaba86bdec5ef35ed 2024-12-13T01:01:05,159 INFO [RS_CLOSE_META-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/hbase/meta/1588230740/table/a6eca33ae5344e9aaba86bdec5ef35ed, entries=38, sequenceid=202, filesize=8.7 K 2024-12-13T01:01:05,160 INFO [RS_CLOSE_META-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~68.66 KB/70312, heapSize ~108.95 KB/111568, currentSize=0 B/0 for 1588230740 in 112ms, sequenceid=202, compaction requested=false 2024-12-13T01:01:05,164 DEBUG [RS_CLOSE_META-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/data/hbase/meta/1588230740/recovered.edits/205.seqid, newMaxSeqId=205, maxSeqId=1 2024-12-13T01:01:05,164 DEBUG [RS_CLOSE_META-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-13T01:01:05,164 DEBUG [RS_CLOSE_META-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-13T01:01:05,165 INFO [RS_CLOSE_META-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-13T01:01:05,165 DEBUG [RS_CLOSE_META-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-13T01:01:05,165 DEBUG [RS_CLOSE_META-regionserver/9812beb1e4cd:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-13T01:01:05,208 INFO [regionserver/9812beb1e4cd:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-13T01:01:05,208 INFO [regionserver/9812beb1e4cd:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-13T01:01:05,249 INFO [RS:0;9812beb1e4cd:46075 {}] regionserver.HRegionServer(1250): stopping server 9812beb1e4cd,46075,1734051384875; all regions closed. 2024-12-13T01:01:05,249 INFO [RS:1;9812beb1e4cd:45231 {}] regionserver.HRegionServer(1250): stopping server 9812beb1e4cd,45231,1734051384968; all regions closed. 2024-12-13T01:01:05,250 INFO [RS:2;9812beb1e4cd:40687 {}] regionserver.HRegionServer(1250): stopping server 9812beb1e4cd,40687,1734051385054; all regions closed. 2024-12-13T01:01:05,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741833_1009 (size=10621) 2024-12-13T01:01:05,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741833_1009 (size=10621) 2024-12-13T01:01:05,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741834_1010 (size=14516) 2024-12-13T01:01:05,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741836_1012 (size=80694) 2024-12-13T01:01:05,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741834_1010 (size=14516) 2024-12-13T01:01:05,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741836_1012 (size=80694) 2024-12-13T01:01:05,256 DEBUG [RS:0;9812beb1e4cd:46075 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/oldWALs 2024-12-13T01:01:05,256 INFO [RS:0;9812beb1e4cd:46075 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 9812beb1e4cd%2C46075%2C1734051384875:(num 1734051387298) 2024-12-13T01:01:05,256 DEBUG [RS:0;9812beb1e4cd:46075 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T01:01:05,256 DEBUG [RS:1;9812beb1e4cd:45231 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/oldWALs 2024-12-13T01:01:05,256 INFO [RS:1;9812beb1e4cd:45231 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 9812beb1e4cd%2C45231%2C1734051384968:(num 1734051387299) 2024-12-13T01:01:05,256 INFO [RS:0;9812beb1e4cd:46075 {}] regionserver.LeaseManager(133): Closed leases 2024-12-13T01:01:05,256 DEBUG [RS:1;9812beb1e4cd:45231 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T01:01:05,256 INFO [RS:1;9812beb1e4cd:45231 {}] regionserver.LeaseManager(133): Closed leases 2024-12-13T01:01:05,256 DEBUG [RS:2;9812beb1e4cd:40687 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/oldWALs 2024-12-13T01:01:05,256 INFO [RS:2;9812beb1e4cd:40687 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 9812beb1e4cd%2C40687%2C1734051385054.meta:.meta(num 1734051387804) 2024-12-13T01:01:05,257 INFO [RS:1;9812beb1e4cd:45231 {}] hbase.ChoreService(370): Chore service for: regionserver/9812beb1e4cd:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS] on shutdown 2024-12-13T01:01:05,257 INFO [RS:0;9812beb1e4cd:46075 {}] hbase.ChoreService(370): Chore service for: regionserver/9812beb1e4cd:0 had [ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-13T01:01:05,257 INFO [RS:1;9812beb1e4cd:45231 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-13T01:01:05,257 INFO [RS:0;9812beb1e4cd:46075 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-13T01:01:05,257 INFO [RS:1;9812beb1e4cd:45231 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-13T01:01:05,257 INFO [RS:0;9812beb1e4cd:46075 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-13T01:01:05,257 INFO [regionserver/9812beb1e4cd:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-13T01:01:05,257 INFO [RS:0;9812beb1e4cd:46075 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-13T01:01:05,257 INFO [RS:1;9812beb1e4cd:45231 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-13T01:01:05,258 INFO [RS:0;9812beb1e4cd:46075 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:46075 2024-12-13T01:01:05,258 INFO [regionserver/9812beb1e4cd:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-13T01:01:05,260 INFO [RS:1;9812beb1e4cd:45231 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:45231 2024-12-13T01:01:05,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42021 is added to blk_1073741835_1011 (size=13137) 2024-12-13T01:01:05,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39849 is added to blk_1073741835_1011 (size=13137) 2024-12-13T01:01:05,263 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45231-0x10088dfff310002, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/9812beb1e4cd,45231,1734051384968 2024-12-13T01:01:05,263 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-13T01:01:05,263 DEBUG [RS:2;9812beb1e4cd:40687 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/oldWALs 2024-12-13T01:01:05,263 INFO [RS:2;9812beb1e4cd:40687 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 9812beb1e4cd%2C40687%2C1734051385054:(num 1734051387299) 2024-12-13T01:01:05,263 DEBUG [RS:2;9812beb1e4cd:40687 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T01:01:05,263 INFO [RS:2;9812beb1e4cd:40687 {}] regionserver.LeaseManager(133): Closed leases 2024-12-13T01:01:05,263 INFO [RS:2;9812beb1e4cd:40687 {}] hbase.ChoreService(370): Chore service for: regionserver/9812beb1e4cd:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS] on shutdown 2024-12-13T01:01:05,263 INFO [regionserver/9812beb1e4cd:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-13T01:01:05,264 INFO [RS:2;9812beb1e4cd:40687 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:40687 2024-12-13T01:01:05,265 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [9812beb1e4cd,46075,1734051384875] 2024-12-13T01:01:05,266 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 9812beb1e4cd,46075,1734051384875; numProcessing=1 2024-12-13T01:01:05,266 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-13T01:01:05,266 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40687-0x10088dfff310003, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/9812beb1e4cd,40687,1734051385054 2024-12-13T01:01:05,266 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46075-0x10088dfff310001, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/9812beb1e4cd,46075,1734051384875 2024-12-13T01:01:05,267 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/9812beb1e4cd,46075,1734051384875 already deleted, retry=false 2024-12-13T01:01:05,267 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 9812beb1e4cd,46075,1734051384875 expired; onlineServers=2 2024-12-13T01:01:05,267 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [9812beb1e4cd,45231,1734051384968] 2024-12-13T01:01:05,267 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 9812beb1e4cd,45231,1734051384968; numProcessing=2 2024-12-13T01:01:05,268 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/9812beb1e4cd,45231,1734051384968 already deleted, retry=false 2024-12-13T01:01:05,268 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 9812beb1e4cd,45231,1734051384968 expired; onlineServers=1 2024-12-13T01:01:05,269 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [9812beb1e4cd,40687,1734051385054] 2024-12-13T01:01:05,269 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 9812beb1e4cd,40687,1734051385054; numProcessing=3 2024-12-13T01:01:05,270 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/9812beb1e4cd,40687,1734051385054 already deleted, retry=false 2024-12-13T01:01:05,270 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 9812beb1e4cd,40687,1734051385054 expired; onlineServers=0 2024-12-13T01:01:05,270 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server '9812beb1e4cd,38293,1734051383822' ***** 2024-12-13T01:01:05,270 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-13T01:01:05,270 DEBUG [M:0;9812beb1e4cd:38293 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6c872910, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=9812beb1e4cd/172.17.0.2:0 2024-12-13T01:01:05,270 INFO [M:0;9812beb1e4cd:38293 {}] regionserver.HRegionServer(1224): stopping server 9812beb1e4cd,38293,1734051383822 2024-12-13T01:01:05,270 INFO [M:0;9812beb1e4cd:38293 {}] regionserver.HRegionServer(1250): stopping server 9812beb1e4cd,38293,1734051383822; all regions closed. 2024-12-13T01:01:05,270 DEBUG [M:0;9812beb1e4cd:38293 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T01:01:05,270 DEBUG [M:0;9812beb1e4cd:38293 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-13T01:01:05,271 DEBUG [M:0;9812beb1e4cd:38293 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-13T01:01:05,271 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-13T01:01:05,271 DEBUG [master/9812beb1e4cd:0:becomeActiveMaster-HFileCleaner.small.0-1734051386719 {}] cleaner.HFileCleaner(306): Exit Thread[master/9812beb1e4cd:0:becomeActiveMaster-HFileCleaner.small.0-1734051386719,5,FailOnTimeoutGroup] 2024-12-13T01:01:05,271 DEBUG [master/9812beb1e4cd:0:becomeActiveMaster-HFileCleaner.large.0-1734051386703 {}] cleaner.HFileCleaner(306): Exit Thread[master/9812beb1e4cd:0:becomeActiveMaster-HFileCleaner.large.0-1734051386703,5,FailOnTimeoutGroup] 2024-12-13T01:01:05,271 INFO [M:0;9812beb1e4cd:38293 {}] hbase.ChoreService(370): Chore service for: master/9812beb1e4cd:0 had [] on shutdown 2024-12-13T01:01:05,271 DEBUG [M:0;9812beb1e4cd:38293 {}] master.HMaster(1733): Stopping service threads 2024-12-13T01:01:05,271 INFO [M:0;9812beb1e4cd:38293 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-13T01:01:05,272 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-13T01:01:05,272 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T01:01:05,272 INFO [M:0;9812beb1e4cd:38293 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-13T01:01:05,272 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-13T01:01:05,272 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-13T01:01:05,272 DEBUG [M:0;9812beb1e4cd:38293 {}] zookeeper.ZKUtil(347): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-13T01:01:05,272 WARN [M:0;9812beb1e4cd:38293 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-13T01:01:05,272 INFO [M:0;9812beb1e4cd:38293 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-12-13T01:01:05,272 INFO [M:0;9812beb1e4cd:38293 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-13T01:01:05,273 DEBUG [M:0;9812beb1e4cd:38293 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-13T01:01:05,286 INFO [M:0;9812beb1e4cd:38293 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-13T01:01:05,286 DEBUG [M:0;9812beb1e4cd:38293 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-13T01:01:05,286 DEBUG [M:0;9812beb1e4cd:38293 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-13T01:01:05,286 DEBUG [M:0;9812beb1e4cd:38293 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-13T01:01:05,287 INFO [M:0;9812beb1e4cd:38293 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=805.82 KB heapSize=967.13 KB 2024-12-13T01:01:05,287 ERROR [AsyncFSWAL-0-hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/MasterData-prefix:9812beb1e4cd,38293,1734051383822 {}] server.NIOServerCnxnFactory(85): Thread Thread[AsyncFSWAL-0-hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/MasterData-prefix:9812beb1e4cd,38293,1734051383822,5,FailOnTimeoutGroup] died java.lang.NullPointerException: Cannot invoke "org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf.readableBytes()" because "this.buf" is null at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.buffered(FanOutOneBlockAsyncDFSOutput.java:419) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.append(AsyncProtobufLogWriter.java:132) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doAppend(AsyncFSWAL.java:830) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doAppend(AsyncFSWAL.java:128) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.appendEntry(AbstractFSWAL.java:1148) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.appendAndSync(AsyncFSWAL.java:500) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.consume(AsyncFSWAL.java:603) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T01:01:05,365 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45231-0x10088dfff310002, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-13T01:01:05,365 INFO [RS:1;9812beb1e4cd:45231 {}] regionserver.HRegionServer(1307): Exiting; stopping=9812beb1e4cd,45231,1734051384968; zookeeper connection closed. 2024-12-13T01:01:05,365 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45231-0x10088dfff310002, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-13T01:01:05,365 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@5b4f2ab4 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@5b4f2ab4 2024-12-13T01:01:05,368 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40687-0x10088dfff310003, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-13T01:01:05,368 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46075-0x10088dfff310001, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-13T01:01:05,368 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40687-0x10088dfff310003, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-13T01:01:05,368 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46075-0x10088dfff310001, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-13T01:01:05,368 INFO [RS:2;9812beb1e4cd:40687 {}] regionserver.HRegionServer(1307): Exiting; stopping=9812beb1e4cd,40687,1734051385054; zookeeper connection closed. 2024-12-13T01:01:05,368 INFO [RS:0;9812beb1e4cd:46075 {}] regionserver.HRegionServer(1307): Exiting; stopping=9812beb1e4cd,46075,1734051384875; zookeeper connection closed. 2024-12-13T01:01:05,368 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@67cecfac {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@67cecfac 2024-12-13T01:01:05,368 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@5c6b1677 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@5c6b1677 2024-12-13T01:01:05,368 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-13T01:01:07,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741834_1010 (size=14516) 2024-12-13T01:01:07,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741833_1009 (size=10621) 2024-12-13T01:01:07,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741836_1012 (size=80694) 2024-12-13T01:01:07,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741830_1006 (size=947281) 2024-12-13T01:01:10,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40551 is added to blk_1073741835_1011 (size=13137) 2024-12-13T01:01:10,568 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-13T01:01:14,530 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-13T01:01:14,530 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-13T01:01:14,530 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-12-13T01:01:14,530 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_acl 2024-12-13T01:01:14,530 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-13T01:01:14,530 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-12-13T01:01:14,530 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-13T01:01:14,530 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-13T01:01:14,530 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot 2024-12-13T01:01:20,032 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-13T01:01:22,773 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-13T01:01:25,184 DEBUG [master/9812beb1e4cd:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=7, created chunk count=10, reused chunk count=22, reuseRatio=68.75% 2024-12-13T01:01:25,185 DEBUG [master/9812beb1e4cd:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-12-13T01:01:33,375 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-13T01:01:52,774 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;9812beb1e4cd:38293 231 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 8 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 19 Waited count: 13 Waiting on java.lang.ref.ReferenceQueue$Lock@a5b06c1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 15 Waited count: 17 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 19 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 17 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4b79ac3a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 3487 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 35 Waiting on java.util.concurrent.CountDownLatch$Sync@1d57c2e9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 12186 Waited count: 12698 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:394) app//org.apache.hadoop.hbase.MiniHBaseCluster.waitUntilShutDown(MiniHBaseCluster.java:921) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniHBaseCluster(HBaseTestingUtility.java:1359) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniCluster(HBaseTestingUtility.java:1341) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 10 Waited count: 11 Waiting on java.lang.ref.ReferenceQueue$Lock@23563e02 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@25484e2e Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@119017fd): State: TIMED_WAITING Blocked count: 0 Waited count: 691 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 70 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1780917956-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f495c428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1780917956-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f495c428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1780917956-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f495c428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1780917956-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f495c428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1780917956-41-acceptor-0@36941b88-ServerConnector@5affd01d{HTTP/1.1, (http/1.1)}{localhost:42245}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1780917956-42): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1780917956-43): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1780917956-44): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-ada0b1c-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 30 Waited count: 3072 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3667e02c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 46775): State: TIMED_WAITING Blocked count: 1 Waited count: 36 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 69 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@b13635f): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 115 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@70667cc7): State: TIMED_WAITING Blocked count: 0 Waited count: 69 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 1 Waited count: 116 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 34026 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1516 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5a475f36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 46775): State: TIMED_WAITING Blocked count: 60 Waited count: 2101 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 46775): State: TIMED_WAITING Blocked count: 72 Waited count: 2112 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 46775): State: TIMED_WAITING Blocked count: 53 Waited count: 2100 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 46775): State: TIMED_WAITING Blocked count: 51 Waited count: 2094 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 46775): State: TIMED_WAITING Blocked count: 52 Waited count: 2099 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@b87406f): State: TIMED_WAITING Blocked count: 0 Waited count: 173 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@30bbf475): State: TIMED_WAITING Blocked count: 0 Waited count: 69 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@49f2f986): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@76d0b6c2): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(1274269859)): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp686157005-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f495c428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp686157005-88-acceptor-0@1b5ce5a1-ServerConnector@499f7acb{HTTP/1.1, (http/1.1)}{localhost:46061}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp686157005-89): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp686157005-90): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-3e0cc4b9-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@54f57554): State: TIMED_WAITING Blocked count: 0 Waited count: 688 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 38845): State: TIMED_WAITING Blocked count: 1 Waited count: 36 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 69 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 1 Waited count: 278 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@798ed1c2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-1041131206-172.17.0.2-1734051379265 heartbeating to localhost/127.0.0.1:46775): State: TIMED_WAITING Blocked count: 1285 Waited count: 1358 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@1bed2cc4): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 38845): State: TIMED_WAITING Blocked count: 0 Waited count: 346 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 38845): State: TIMED_WAITING Blocked count: 0 Waited count: 344 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 38845): State: TIMED_WAITING Blocked count: 0 Waited count: 344 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 38845): State: TIMED_WAITING Blocked count: 0 Waited count: 347 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 38845): State: TIMED_WAITING Blocked count: 0 Waited count: 355 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 118 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (qtp1838786507-119): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f495c428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (qtp1838786507-120-acceptor-0@63670ecd-ServerConnector@49e47e93{HTTP/1.1, (http/1.1)}{localhost:42911}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp1838786507-121): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp1838786507-124): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-12aecced-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (IPC Client (1941338361) connection to localhost/127.0.0.1:46775 from jenkins): State: TIMED_WAITING Blocked count: 1317 Waited count: 1318 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 123 (IPC Parameter Sending Thread for localhost/127.0.0.1:46775): State: TIMED_WAITING Blocked count: 0 Waited count: 1888 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@32493c28): State: TIMED_WAITING Blocked count: 0 Waited count: 687 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 36287): State: TIMED_WAITING Blocked count: 1 Waited count: 36 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 69 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 3 Waited count: 273 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48de15bc Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-1041131206-172.17.0.2-1734051379265 heartbeating to localhost/127.0.0.1:46775): State: TIMED_WAITING Blocked count: 1272 Waited count: 1363 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@5d22c3e3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 139 (IPC Server handler 0 on default port 36287): State: TIMED_WAITING Blocked count: 0 Waited count: 344 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 1 on default port 36287): State: TIMED_WAITING Blocked count: 0 Waited count: 346 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 2 on default port 36287): State: TIMED_WAITING Blocked count: 0 Waited count: 346 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 3 on default port 36287): State: TIMED_WAITING Blocked count: 0 Waited count: 347 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 143 (IPC Server handler 4 on default port 36287): State: TIMED_WAITING Blocked count: 0 Waited count: 347 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 155 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp412220441-156): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f495c428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (qtp412220441-157-acceptor-0@52c6fc2f-ServerConnector@3e46a01f{HTTP/1.1, (http/1.1)}{localhost:39891}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (qtp412220441-159): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 160 (qtp412220441-160): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Session-HouseKeeper-7f6e076-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/dfs/data/data1)): State: TIMED_WAITING Blocked count: 3 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 162 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/dfs/data/data3)): State: TIMED_WAITING Blocked count: 15 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 164 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/dfs/data/data2)): State: TIMED_WAITING Blocked count: 14 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 165 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/dfs/data/data4)): State: TIMED_WAITING Blocked count: 14 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 172 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/dfs/data/data3/current/BP-1041131206-172.17.0.2-1734051379265): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 173 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/dfs/data/data1/current/BP-1041131206-172.17.0.2-1734051379265): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 176 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/dfs/data/data4/current/BP-1041131206-172.17.0.2-1734051379265): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 177 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/dfs/data/data2/current/BP-1041131206-172.17.0.2-1734051379265): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 182 (ForkJoinPool-2-worker-2): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 184 (ForkJoinPool-2-worker-3): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.ForkJoinPool@cee8c12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 185 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 190 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@513a50c7): State: TIMED_WAITING Blocked count: 0 Waited count: 687 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 194 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 196 (IPC Server idle connection scanner for port 41429): State: TIMED_WAITING Blocked count: 1 Waited count: 36 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 195 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 197 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 199 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 69 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 202 (java.util.concurrent.ThreadPoolExecutor$Worker@7cf6afb[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 205 (java.util.concurrent.ThreadPoolExecutor$Worker@9201e9f[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 208 (Command processor): State: WAITING Blocked count: 1 Waited count: 277 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7b806033 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 209 (BP-1041131206-172.17.0.2-1734051379265 heartbeating to localhost/127.0.0.1:46775): State: TIMED_WAITING Blocked count: 1237 Waited count: 1362 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 210 (pool-46-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@4c3b7bbf): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 193 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 211 (IPC Server handler 0 on default port 41429): State: TIMED_WAITING Blocked count: 0 Waited count: 344 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 212 (IPC Server handler 1 on default port 41429): State: TIMED_WAITING Blocked count: 0 Waited count: 348 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 213 (IPC Server handler 2 on default port 41429): State: TIMED_WAITING Blocked count: 0 Waited count: 344 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 214 (IPC Server handler 3 on default port 41429): State: TIMED_WAITING Blocked count: 0 Waited count: 347 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 215 (IPC Server handler 4 on default port 41429): State: TIMED_WAITING Blocked count: 0 Waited count: 345 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 218 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/dfs/data/data5)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 219 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/dfs/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 223 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/dfs/data/data6/current/BP-1041131206-172.17.0.2-1734051379265): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 225 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/dfs/data/data5/current/BP-1041131206-172.17.0.2-1734051379265): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 229 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 232 (java.util.concurrent.ThreadPoolExecutor$Worker@4a9bc2b[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 237 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtility.java:599) Thread 239 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 240 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 241 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:49424): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 238 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 35 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 242 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 172 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 243 (SyncThread:0): State: WAITING Blocked count: 26 Waited count: 738 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@53924d3c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 244 (ProcessThread(sid:0 cport:49424):): State: WAITING Blocked count: 1 Waited count: 841 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7b89d53c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 245 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 865 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60775b1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 246 (NIOWorkerThread-1): State: WAITING Blocked count: 2 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e99a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6353d8e1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 280 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (RS-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 18 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (Time-limited test-SendThread(127.0.0.1:49424)): State: RUNNABLE Blocked count: 23 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 261 (Time-limited test-EventThread): State: WAITING Blocked count: 2 Waited count: 58 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@cb3cfff Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 262 (NIOWorkerThread-2): State: WAITING Blocked count: 3 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e99a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-3): State: WAITING Blocked count: 3 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e99a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-4): State: WAITING Blocked count: 3 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e99a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (zk-event-processor-pool-0): State: WAITING Blocked count: 37 Waited count: 88 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@626514b6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-5): State: WAITING Blocked count: 3 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e99a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-6): State: WAITING Blocked count: 3 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e99a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-7): State: WAITING Blocked count: 2 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e99a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-8): State: WAITING Blocked count: 0 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e99a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-9): State: WAITING Blocked count: 3 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e99a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-10): State: WAITING Blocked count: 1 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e99a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-11): State: WAITING Blocked count: 2 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e99a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-12): State: WAITING Blocked count: 1 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e99a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-13): State: WAITING Blocked count: 5 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e99a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-14): State: WAITING Blocked count: 1 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e99a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-15): State: WAITING Blocked count: 4 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e99a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (NIOWorkerThread-16): State: WAITING Blocked count: 3 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e99a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38293): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@1f8c5ccb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293): State: WAITING Blocked count: 234 Waited count: 926 Waiting on java.util.concurrent.Semaphore$NonfairSync@7fa65479 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 280 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293): State: WAITING Blocked count: 72 Waited count: 234 Waiting on java.util.concurrent.Semaphore$NonfairSync@6d78b045 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 281 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38293): State: WAITING Blocked count: 76 Waited count: 6449 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@27f2c9f8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 282 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38293): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5ae5b3c9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 283 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38293): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5ae5b3c9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=38293): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@172d3170 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=38293): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@3fff5148 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 286 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=38293): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@e73e246 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 287 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=38293): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@79ff63ba Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 291 (RS-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 313 (RS-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 335 (RS-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 32 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 289 (M:0;9812beb1e4cd:38293): State: TIMED_WAITING Blocked count: 7 Waited count: 2803 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:879) app//org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doSync(AsyncFSWAL.java:671) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$1(AbstractFSWAL.java:600) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$953/0x00007f495cf1e0c8.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:600) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:590) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2869) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2811) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2670) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2644) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2635) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1810) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1631) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1586) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1569) Thread 358 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 35 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 360 (master/9812beb1e4cd:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 362 (master/9812beb1e4cd:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 364 (org.apache.hadoop.hdfs.PeerCache@7427d00f): State: TIMED_WAITING Blocked count: 0 Waited count: 114 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 383 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 3393 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:179) Thread 398 (RS-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 54 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 399 (RS-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 51 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 409 (Idle-Rpc-Conn-Sweeper-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 55 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 420 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 34 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 408 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 33860 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 429 (RS-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 30 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 430 (RS-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 17 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 453 (RegionServerTracker-0): State: WAITING Blocked count: 8 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17d471e3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 470 (regionserver/9812beb1e4cd:0.procedureResultReporter): State: WAITING Blocked count: 14 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@797c797a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 473 (regionserver/9812beb1e4cd:0.procedureResultReporter): State: WAITING Blocked count: 16 Waited count: 33 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@176fb430 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 481 (regionserver/9812beb1e4cd:0.procedureResultReporter): State: WAITING Blocked count: 11 Waited count: 23 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4c691053 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 498 (LeaseRenewer:jenkins.hfs.0@localhost:46775): State: TIMED_WAITING Blocked count: 9 Waited count: 353 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 501 (LeaseRenewer:jenkins.hfs.1@localhost:46775): State: TIMED_WAITING Blocked count: 10 Waited count: 353 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 506 (LeaseRenewer:jenkins.hfs.2@localhost:46775): State: TIMED_WAITING Blocked count: 9 Waited count: 353 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 518 (region-location-0): State: WAITING Blocked count: 10 Waited count: 17 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@78063b54 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 534 (RS-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 540 (RS-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 543 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 33612 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 557 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 16 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 574 (ForkJoinPool.commonPool-worker-1): State: TIMED_WAITING Blocked count: 0 Waited count: 623 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 575 (ForkJoinPool.commonPool-worker-2): State: WAITING Blocked count: 0 Waited count: 180 Waiting on java.util.concurrent.ForkJoinPool@622b3a87 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 584 (ForkJoinPool.commonPool-worker-3): State: WAITING Blocked count: 0 Waited count: 824 Waiting on java.util.concurrent.ForkJoinPool@622b3a87 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 597 (region-location-1): State: WAITING Blocked count: 4 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@78063b54 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 598 (region-location-2): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@78063b54 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 599 (region-location-3): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@78063b54 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1006 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 414 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1072 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 9 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1110 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 59 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@c60be1c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1168 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 3 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1522 (Container metrics unregistration): State: WAITING Blocked count: 10 Waited count: 33 Waiting on java.util.TaskQueue@4575f85f Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1998 (RS-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2008 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2009 (RS-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3329 (region-location-4): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@78063b54 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4696 (java.util.concurrent.ThreadPoolExecutor$Worker@193c3054[State = -1, empty queue]): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7eb11d74 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4940 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4941 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4942 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8923 (AsyncFSWAL-1-hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/MasterData-prefix:9812beb1e4cd,38293,1734051383822): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@82e9798 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8925 (java.util.concurrent.ThreadPoolExecutor$Worker@1d705bd1[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8926 (java.util.concurrent.ThreadPoolExecutor$Worker@ab123b1[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8929 (java.util.concurrent.ThreadPoolExecutor$Worker@622cf8ae[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8930 (java.util.concurrent.ThreadPoolExecutor$Worker@7502434d[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8936 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-12-13T01:02:22,774 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-13T01:02:52,774 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;9812beb1e4cd:38293 222 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 8 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 19 Waited count: 13 Waiting on java.lang.ref.ReferenceQueue$Lock@a5b06c1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 15 Waited count: 18 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 22 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 20 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4b79ac3a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 4087 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 41 Waiting on java.util.concurrent.CountDownLatch$Sync@61a8c375 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 12186 Waited count: 12699 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:394) app//org.apache.hadoop.hbase.MiniHBaseCluster.waitUntilShutDown(MiniHBaseCluster.java:921) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniHBaseCluster(HBaseTestingUtility.java:1359) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniCluster(HBaseTestingUtility.java:1341) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 10 Waited count: 11 Waiting on java.lang.ref.ReferenceQueue$Lock@23563e02 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@25484e2e Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@119017fd): State: TIMED_WAITING Blocked count: 0 Waited count: 811 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 82 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1780917956-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f495c428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1780917956-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f495c428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1780917956-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f495c428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1780917956-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f495c428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1780917956-41-acceptor-0@36941b88-ServerConnector@5affd01d{HTTP/1.1, (http/1.1)}{localhost:42245}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1780917956-42): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1780917956-43): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1780917956-44): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-ada0b1c-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 30 Waited count: 3072 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3667e02c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 46775): State: TIMED_WAITING Blocked count: 1 Waited count: 42 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 81 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@b13635f): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 135 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@70667cc7): State: TIMED_WAITING Blocked count: 0 Waited count: 81 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 1 Waited count: 136 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 39989 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1516 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5a475f36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 46775): State: TIMED_WAITING Blocked count: 60 Waited count: 2162 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 46775): State: TIMED_WAITING Blocked count: 72 Waited count: 2173 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 46775): State: TIMED_WAITING Blocked count: 53 Waited count: 2161 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 46775): State: TIMED_WAITING Blocked count: 51 Waited count: 2155 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 46775): State: TIMED_WAITING Blocked count: 52 Waited count: 2160 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@b87406f): State: TIMED_WAITING Blocked count: 0 Waited count: 203 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@30bbf475): State: TIMED_WAITING Blocked count: 0 Waited count: 81 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@49f2f986): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@76d0b6c2): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(1274269859)): State: TIMED_WAITING Blocked count: 0 Waited count: 15 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp686157005-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f495c428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp686157005-88-acceptor-0@1b5ce5a1-ServerConnector@499f7acb{HTTP/1.1, (http/1.1)}{localhost:46061}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp686157005-89): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp686157005-90): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-3e0cc4b9-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@54f57554): State: TIMED_WAITING Blocked count: 0 Waited count: 808 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 38845): State: TIMED_WAITING Blocked count: 1 Waited count: 42 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 81 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 1 Waited count: 298 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@798ed1c2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-1041131206-172.17.0.2-1734051379265 heartbeating to localhost/127.0.0.1:46775): State: TIMED_WAITING Blocked count: 1305 Waited count: 1398 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@1bed2cc4): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 38845): State: TIMED_WAITING Blocked count: 0 Waited count: 406 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 38845): State: TIMED_WAITING Blocked count: 0 Waited count: 404 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 38845): State: TIMED_WAITING Blocked count: 0 Waited count: 404 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 38845): State: TIMED_WAITING Blocked count: 0 Waited count: 413 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 38845): State: TIMED_WAITING Blocked count: 0 Waited count: 430 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 118 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (qtp1838786507-119): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f495c428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (qtp1838786507-120-acceptor-0@63670ecd-ServerConnector@49e47e93{HTTP/1.1, (http/1.1)}{localhost:42911}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp1838786507-121): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp1838786507-124): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-12aecced-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (IPC Client (1941338361) connection to localhost/127.0.0.1:46775 from jenkins): State: TIMED_WAITING Blocked count: 1376 Waited count: 1377 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 123 (IPC Parameter Sending Thread for localhost/127.0.0.1:46775): State: TIMED_WAITING Blocked count: 0 Waited count: 1948 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@32493c28): State: TIMED_WAITING Blocked count: 0 Waited count: 807 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 36287): State: TIMED_WAITING Blocked count: 1 Waited count: 42 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 81 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 3 Waited count: 293 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48de15bc Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-1041131206-172.17.0.2-1734051379265 heartbeating to localhost/127.0.0.1:46775): State: TIMED_WAITING Blocked count: 1292 Waited count: 1403 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@5d22c3e3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 139 (IPC Server handler 0 on default port 36287): State: TIMED_WAITING Blocked count: 0 Waited count: 404 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 1 on default port 36287): State: TIMED_WAITING Blocked count: 0 Waited count: 406 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 2 on default port 36287): State: TIMED_WAITING Blocked count: 0 Waited count: 406 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 3 on default port 36287): State: TIMED_WAITING Blocked count: 0 Waited count: 407 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 143 (IPC Server handler 4 on default port 36287): State: TIMED_WAITING Blocked count: 0 Waited count: 407 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 155 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp412220441-156): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f495c428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (qtp412220441-157-acceptor-0@52c6fc2f-ServerConnector@3e46a01f{HTTP/1.1, (http/1.1)}{localhost:39891}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (qtp412220441-159): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 160 (qtp412220441-160): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Session-HouseKeeper-7f6e076-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/dfs/data/data1)): State: TIMED_WAITING Blocked count: 3 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 162 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/dfs/data/data3)): State: TIMED_WAITING Blocked count: 15 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 164 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/dfs/data/data2)): State: TIMED_WAITING Blocked count: 14 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 165 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/dfs/data/data4)): State: TIMED_WAITING Blocked count: 14 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 172 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/dfs/data/data3/current/BP-1041131206-172.17.0.2-1734051379265): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 173 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/dfs/data/data1/current/BP-1041131206-172.17.0.2-1734051379265): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 176 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/dfs/data/data4/current/BP-1041131206-172.17.0.2-1734051379265): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 177 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/dfs/data/data2/current/BP-1041131206-172.17.0.2-1734051379265): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 184 (ForkJoinPool-2-worker-3): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 185 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 190 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@513a50c7): State: TIMED_WAITING Blocked count: 0 Waited count: 807 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 194 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 196 (IPC Server idle connection scanner for port 41429): State: TIMED_WAITING Blocked count: 1 Waited count: 42 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 195 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 197 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 199 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 81 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 202 (java.util.concurrent.ThreadPoolExecutor$Worker@7cf6afb[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 205 (java.util.concurrent.ThreadPoolExecutor$Worker@9201e9f[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 208 (Command processor): State: WAITING Blocked count: 1 Waited count: 297 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7b806033 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 209 (BP-1041131206-172.17.0.2-1734051379265 heartbeating to localhost/127.0.0.1:46775): State: TIMED_WAITING Blocked count: 1257 Waited count: 1402 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 210 (pool-46-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@4c3b7bbf): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 193 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 211 (IPC Server handler 0 on default port 41429): State: TIMED_WAITING Blocked count: 0 Waited count: 404 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 212 (IPC Server handler 1 on default port 41429): State: TIMED_WAITING Blocked count: 0 Waited count: 408 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 213 (IPC Server handler 2 on default port 41429): State: TIMED_WAITING Blocked count: 0 Waited count: 404 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 214 (IPC Server handler 3 on default port 41429): State: TIMED_WAITING Blocked count: 0 Waited count: 407 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 215 (IPC Server handler 4 on default port 41429): State: TIMED_WAITING Blocked count: 0 Waited count: 405 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 218 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/dfs/data/data5)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 219 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/dfs/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 223 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/dfs/data/data6/current/BP-1041131206-172.17.0.2-1734051379265): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 225 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/dfs/data/data5/current/BP-1041131206-172.17.0.2-1734051379265): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 229 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 232 (java.util.concurrent.ThreadPoolExecutor$Worker@4a9bc2b[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 237 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 14 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtility.java:599) Thread 239 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 240 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 241 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:49424): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 238 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 41 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 242 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 202 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 243 (SyncThread:0): State: WAITING Blocked count: 26 Waited count: 743 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@53924d3c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 244 (ProcessThread(sid:0 cport:49424):): State: WAITING Blocked count: 1 Waited count: 846 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7b89d53c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 245 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 870 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60775b1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 246 (NIOWorkerThread-1): State: WAITING Blocked count: 2 Waited count: 135 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e99a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6353d8e1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 318 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (RS-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 18 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (Time-limited test-SendThread(127.0.0.1:49424)): State: RUNNABLE Blocked count: 23 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 261 (Time-limited test-EventThread): State: WAITING Blocked count: 2 Waited count: 58 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@cb3cfff Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 262 (NIOWorkerThread-2): State: WAITING Blocked count: 3 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e99a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-3): State: WAITING Blocked count: 3 Waited count: 135 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e99a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-4): State: WAITING Blocked count: 3 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e99a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (zk-event-processor-pool-0): State: WAITING Blocked count: 37 Waited count: 88 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@626514b6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-5): State: WAITING Blocked count: 3 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e99a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-6): State: WAITING Blocked count: 3 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e99a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-7): State: WAITING Blocked count: 2 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e99a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-8): State: WAITING Blocked count: 0 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e99a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-9): State: WAITING Blocked count: 3 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e99a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-10): State: WAITING Blocked count: 1 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e99a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-11): State: WAITING Blocked count: 2 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e99a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-12): State: WAITING Blocked count: 1 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e99a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-13): State: WAITING Blocked count: 5 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e99a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-14): State: WAITING Blocked count: 1 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e99a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-15): State: WAITING Blocked count: 4 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e99a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (NIOWorkerThread-16): State: WAITING Blocked count: 3 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e99a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38293): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@1f8c5ccb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293): State: WAITING Blocked count: 234 Waited count: 926 Waiting on java.util.concurrent.Semaphore$NonfairSync@7fa65479 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 280 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293): State: WAITING Blocked count: 72 Waited count: 234 Waiting on java.util.concurrent.Semaphore$NonfairSync@6d78b045 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 281 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38293): State: WAITING Blocked count: 76 Waited count: 6449 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@27f2c9f8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 282 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38293): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5ae5b3c9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 283 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38293): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5ae5b3c9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=38293): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@172d3170 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=38293): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@3fff5148 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 286 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=38293): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@e73e246 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 287 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=38293): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@79ff63ba Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 291 (RS-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 313 (RS-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 335 (RS-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 32 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 289 (M:0;9812beb1e4cd:38293): State: TIMED_WAITING Blocked count: 7 Waited count: 2803 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:879) app//org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doSync(AsyncFSWAL.java:671) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$1(AbstractFSWAL.java:600) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$953/0x00007f495cf1e0c8.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:600) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:590) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2869) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2811) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2670) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2644) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2635) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1810) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1631) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1586) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1569) Thread 358 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 41 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 360 (master/9812beb1e4cd:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 362 (master/9812beb1e4cd:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 364 (org.apache.hadoop.hdfs.PeerCache@7427d00f): State: TIMED_WAITING Blocked count: 0 Waited count: 134 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 383 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 3992 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:179) Thread 398 (RS-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 54 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 399 (RS-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 51 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 409 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 68 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5187ef5a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 420 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 40 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 408 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 39862 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 429 (RS-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 30 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 430 (RS-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 17 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 453 (RegionServerTracker-0): State: WAITING Blocked count: 8 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17d471e3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 470 (regionserver/9812beb1e4cd:0.procedureResultReporter): State: WAITING Blocked count: 14 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@797c797a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 473 (regionserver/9812beb1e4cd:0.procedureResultReporter): State: WAITING Blocked count: 16 Waited count: 33 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@176fb430 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 481 (regionserver/9812beb1e4cd:0.procedureResultReporter): State: WAITING Blocked count: 11 Waited count: 23 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4c691053 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 518 (region-location-0): State: WAITING Blocked count: 10 Waited count: 17 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@78063b54 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 534 (RS-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 540 (RS-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 543 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 39614 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 557 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 16 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 575 (ForkJoinPool.commonPool-worker-2): State: WAITING Blocked count: 0 Waited count: 180 Waiting on java.util.concurrent.ForkJoinPool@622b3a87 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 584 (ForkJoinPool.commonPool-worker-3): State: TIMED_WAITING Blocked count: 0 Waited count: 825 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 597 (region-location-1): State: WAITING Blocked count: 4 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@78063b54 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 598 (region-location-2): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@78063b54 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 599 (region-location-3): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@78063b54 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1006 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 420 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1072 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 9 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1110 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 59 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@c60be1c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1168 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 3 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1522 (Container metrics unregistration): State: WAITING Blocked count: 10 Waited count: 33 Waiting on java.util.TaskQueue@4575f85f Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1998 (RS-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2008 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2009 (RS-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3329 (region-location-4): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@78063b54 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4696 (java.util.concurrent.ThreadPoolExecutor$Worker@193c3054[State = -1, empty queue]): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7eb11d74 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4940 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4941 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4942 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8923 (AsyncFSWAL-1-hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/MasterData-prefix:9812beb1e4cd,38293,1734051383822): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@82e9798 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8936 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-12-13T01:03:22,774 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-13T01:03:52,775 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;9812beb1e4cd:38293 220 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 8 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 19 Waited count: 13 Waiting on java.lang.ref.ReferenceQueue$Lock@a5b06c1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 15 Waited count: 19 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 25 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 23 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4b79ac3a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 4687 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 47 Waiting on java.util.concurrent.CountDownLatch$Sync@5a621e3f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 12186 Waited count: 12700 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:394) app//org.apache.hadoop.hbase.MiniHBaseCluster.waitUntilShutDown(MiniHBaseCluster.java:921) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniHBaseCluster(HBaseTestingUtility.java:1359) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniCluster(HBaseTestingUtility.java:1341) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 10 Waited count: 11 Waiting on java.lang.ref.ReferenceQueue$Lock@23563e02 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@25484e2e Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@119017fd): State: TIMED_WAITING Blocked count: 0 Waited count: 932 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 94 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1780917956-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f495c428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1780917956-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f495c428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1780917956-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f495c428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1780917956-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f495c428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1780917956-41-acceptor-0@36941b88-ServerConnector@5affd01d{HTTP/1.1, (http/1.1)}{localhost:42245}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1780917956-42): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1780917956-43): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1780917956-44): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-ada0b1c-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 30 Waited count: 3072 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3667e02c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 46775): State: TIMED_WAITING Blocked count: 1 Waited count: 48 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 93 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@b13635f): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 155 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@70667cc7): State: TIMED_WAITING Blocked count: 0 Waited count: 93 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 1 Waited count: 156 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 45953 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1516 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5a475f36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 46775): State: TIMED_WAITING Blocked count: 65 Waited count: 2223 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 46775): State: TIMED_WAITING Blocked count: 75 Waited count: 2235 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 46775): State: TIMED_WAITING Blocked count: 54 Waited count: 2223 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 46775): State: TIMED_WAITING Blocked count: 56 Waited count: 2216 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 46775): State: TIMED_WAITING Blocked count: 52 Waited count: 2221 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@b87406f): State: TIMED_WAITING Blocked count: 0 Waited count: 233 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@30bbf475): State: TIMED_WAITING Blocked count: 0 Waited count: 93 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@49f2f986): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@76d0b6c2): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(1274269859)): State: TIMED_WAITING Blocked count: 0 Waited count: 17 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp686157005-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f495c428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp686157005-88-acceptor-0@1b5ce5a1-ServerConnector@499f7acb{HTTP/1.1, (http/1.1)}{localhost:46061}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp686157005-89): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp686157005-90): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-3e0cc4b9-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@54f57554): State: TIMED_WAITING Blocked count: 0 Waited count: 928 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 38845): State: TIMED_WAITING Blocked count: 1 Waited count: 48 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 93 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 1 Waited count: 318 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@798ed1c2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-1041131206-172.17.0.2-1734051379265 heartbeating to localhost/127.0.0.1:46775): State: TIMED_WAITING Blocked count: 1326 Waited count: 1440 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@1bed2cc4): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 38845): State: TIMED_WAITING Blocked count: 0 Waited count: 466 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 38845): State: TIMED_WAITING Blocked count: 0 Waited count: 464 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 38845): State: TIMED_WAITING Blocked count: 0 Waited count: 464 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 38845): State: TIMED_WAITING Blocked count: 0 Waited count: 475 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 38845): State: TIMED_WAITING Blocked count: 0 Waited count: 496 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 118 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (qtp1838786507-119): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f495c428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (qtp1838786507-120-acceptor-0@63670ecd-ServerConnector@49e47e93{HTTP/1.1, (http/1.1)}{localhost:42911}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp1838786507-121): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp1838786507-124): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-12aecced-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (IPC Client (1941338361) connection to localhost/127.0.0.1:46775 from jenkins): State: TIMED_WAITING Blocked count: 1423 Waited count: 1424 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 123 (IPC Parameter Sending Thread for localhost/127.0.0.1:46775): State: TIMED_WAITING Blocked count: 0 Waited count: 1997 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@32493c28): State: TIMED_WAITING Blocked count: 0 Waited count: 927 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 36287): State: TIMED_WAITING Blocked count: 1 Waited count: 48 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 93 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 3 Waited count: 313 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48de15bc Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-1041131206-172.17.0.2-1734051379265 heartbeating to localhost/127.0.0.1:46775): State: TIMED_WAITING Blocked count: 1312 Waited count: 1452 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@5d22c3e3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 139 (IPC Server handler 0 on default port 36287): State: TIMED_WAITING Blocked count: 0 Waited count: 464 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 1 on default port 36287): State: TIMED_WAITING Blocked count: 0 Waited count: 466 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 2 on default port 36287): State: TIMED_WAITING Blocked count: 0 Waited count: 466 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 3 on default port 36287): State: TIMED_WAITING Blocked count: 0 Waited count: 467 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 143 (IPC Server handler 4 on default port 36287): State: TIMED_WAITING Blocked count: 0 Waited count: 467 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 155 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp412220441-156): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f495c428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (qtp412220441-157-acceptor-0@52c6fc2f-ServerConnector@3e46a01f{HTTP/1.1, (http/1.1)}{localhost:39891}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (qtp412220441-159): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 160 (qtp412220441-160): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Session-HouseKeeper-7f6e076-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/dfs/data/data1)): State: TIMED_WAITING Blocked count: 3 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 162 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/dfs/data/data3)): State: TIMED_WAITING Blocked count: 15 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 164 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/dfs/data/data2)): State: TIMED_WAITING Blocked count: 14 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 165 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/dfs/data/data4)): State: TIMED_WAITING Blocked count: 14 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 172 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/dfs/data/data3/current/BP-1041131206-172.17.0.2-1734051379265): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 173 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/dfs/data/data1/current/BP-1041131206-172.17.0.2-1734051379265): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 176 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/dfs/data/data4/current/BP-1041131206-172.17.0.2-1734051379265): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 177 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/dfs/data/data2/current/BP-1041131206-172.17.0.2-1734051379265): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 185 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 190 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@513a50c7): State: TIMED_WAITING Blocked count: 0 Waited count: 927 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 194 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 196 (IPC Server idle connection scanner for port 41429): State: TIMED_WAITING Blocked count: 1 Waited count: 48 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 195 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 197 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 199 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 93 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 202 (java.util.concurrent.ThreadPoolExecutor$Worker@7cf6afb[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 205 (java.util.concurrent.ThreadPoolExecutor$Worker@9201e9f[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 208 (Command processor): State: WAITING Blocked count: 1 Waited count: 317 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7b806033 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 209 (BP-1041131206-172.17.0.2-1734051379265 heartbeating to localhost/127.0.0.1:46775): State: TIMED_WAITING Blocked count: 1277 Waited count: 1442 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 210 (pool-46-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@4c3b7bbf): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 193 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 211 (IPC Server handler 0 on default port 41429): State: TIMED_WAITING Blocked count: 0 Waited count: 464 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 212 (IPC Server handler 1 on default port 41429): State: TIMED_WAITING Blocked count: 0 Waited count: 468 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 213 (IPC Server handler 2 on default port 41429): State: TIMED_WAITING Blocked count: 0 Waited count: 464 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 214 (IPC Server handler 3 on default port 41429): State: TIMED_WAITING Blocked count: 0 Waited count: 467 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 215 (IPC Server handler 4 on default port 41429): State: TIMED_WAITING Blocked count: 0 Waited count: 465 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 218 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/dfs/data/data5)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 219 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/dfs/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 223 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/dfs/data/data6/current/BP-1041131206-172.17.0.2-1734051379265): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 225 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/dfs/data/data5/current/BP-1041131206-172.17.0.2-1734051379265): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 229 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 232 (java.util.concurrent.ThreadPoolExecutor$Worker@4a9bc2b[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 237 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 16 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtility.java:599) Thread 239 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 240 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 241 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:49424): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 238 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 47 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 242 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 232 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 243 (SyncThread:0): State: WAITING Blocked count: 26 Waited count: 747 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@53924d3c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 244 (ProcessThread(sid:0 cport:49424):): State: WAITING Blocked count: 1 Waited count: 850 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7b89d53c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 245 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 874 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60775b1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 246 (NIOWorkerThread-1): State: WAITING Blocked count: 2 Waited count: 135 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e99a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6353d8e1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 356 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (RS-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 18 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (Time-limited test-SendThread(127.0.0.1:49424)): State: RUNNABLE Blocked count: 23 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 261 (Time-limited test-EventThread): State: WAITING Blocked count: 2 Waited count: 58 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@cb3cfff Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 262 (NIOWorkerThread-2): State: WAITING Blocked count: 3 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e99a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-3): State: WAITING Blocked count: 3 Waited count: 135 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e99a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-4): State: WAITING Blocked count: 3 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e99a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (zk-event-processor-pool-0): State: WAITING Blocked count: 37 Waited count: 88 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@626514b6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-5): State: WAITING Blocked count: 3 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e99a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-6): State: WAITING Blocked count: 3 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e99a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-7): State: WAITING Blocked count: 2 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e99a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-8): State: WAITING Blocked count: 0 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e99a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-9): State: WAITING Blocked count: 3 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e99a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-10): State: WAITING Blocked count: 1 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e99a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-11): State: WAITING Blocked count: 2 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e99a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-12): State: WAITING Blocked count: 1 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e99a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-13): State: WAITING Blocked count: 5 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e99a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-14): State: WAITING Blocked count: 1 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e99a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-15): State: WAITING Blocked count: 4 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e99a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (NIOWorkerThread-16): State: WAITING Blocked count: 3 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e99a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38293): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@1f8c5ccb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293): State: WAITING Blocked count: 234 Waited count: 926 Waiting on java.util.concurrent.Semaphore$NonfairSync@7fa65479 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 280 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293): State: WAITING Blocked count: 72 Waited count: 234 Waiting on java.util.concurrent.Semaphore$NonfairSync@6d78b045 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 281 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38293): State: WAITING Blocked count: 76 Waited count: 6449 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@27f2c9f8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 282 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38293): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5ae5b3c9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 283 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38293): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5ae5b3c9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=38293): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@172d3170 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=38293): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@3fff5148 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 286 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=38293): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@e73e246 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 287 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=38293): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@79ff63ba Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 291 (RS-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 313 (RS-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 335 (RS-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 32 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 289 (M:0;9812beb1e4cd:38293): State: TIMED_WAITING Blocked count: 7 Waited count: 2803 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:879) app//org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doSync(AsyncFSWAL.java:671) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$1(AbstractFSWAL.java:600) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$953/0x00007f495cf1e0c8.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:600) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:590) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2869) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2811) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2670) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2644) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2635) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1810) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1631) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1586) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1569) Thread 358 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 47 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 360 (master/9812beb1e4cd:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 362 (master/9812beb1e4cd:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 364 (org.apache.hadoop.hdfs.PeerCache@7427d00f): State: TIMED_WAITING Blocked count: 0 Waited count: 154 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 383 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 4592 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:179) Thread 398 (RS-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 54 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 399 (RS-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 51 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 409 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 68 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5187ef5a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 420 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 46 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 408 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 45864 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 429 (RS-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 30 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 430 (RS-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 17 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 453 (RegionServerTracker-0): State: WAITING Blocked count: 8 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17d471e3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 470 (regionserver/9812beb1e4cd:0.procedureResultReporter): State: WAITING Blocked count: 14 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@797c797a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 473 (regionserver/9812beb1e4cd:0.procedureResultReporter): State: WAITING Blocked count: 16 Waited count: 33 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@176fb430 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 481 (regionserver/9812beb1e4cd:0.procedureResultReporter): State: WAITING Blocked count: 11 Waited count: 23 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4c691053 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 518 (region-location-0): State: WAITING Blocked count: 10 Waited count: 17 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@78063b54 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 534 (RS-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 540 (RS-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 543 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 45616 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 557 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 16 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 575 (ForkJoinPool.commonPool-worker-2): State: TIMED_WAITING Blocked count: 0 Waited count: 181 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 597 (region-location-1): State: WAITING Blocked count: 4 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@78063b54 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 598 (region-location-2): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@78063b54 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 599 (region-location-3): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@78063b54 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1006 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 426 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1072 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 9 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1110 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 59 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@c60be1c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1168 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 3 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1522 (Container metrics unregistration): State: WAITING Blocked count: 10 Waited count: 33 Waiting on java.util.TaskQueue@4575f85f Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1998 (RS-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2008 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2009 (RS-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3329 (region-location-4): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@78063b54 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4696 (java.util.concurrent.ThreadPoolExecutor$Worker@193c3054[State = -1, empty queue]): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7eb11d74 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4940 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4941 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4942 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8923 (AsyncFSWAL-1-hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/MasterData-prefix:9812beb1e4cd,38293,1734051383822): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@82e9798 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8936 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 16 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-12-13T01:04:22,775 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-13T01:04:52,775 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;9812beb1e4cd:38293 219 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 8 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 19 Waited count: 13 Waiting on java.lang.ref.ReferenceQueue$Lock@a5b06c1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 15 Waited count: 20 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 28 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 26 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4b79ac3a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 5286 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 53 Waiting on java.util.concurrent.CountDownLatch$Sync@66494d9a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 12186 Waited count: 12701 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:394) app//org.apache.hadoop.hbase.MiniHBaseCluster.waitUntilShutDown(MiniHBaseCluster.java:921) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniHBaseCluster(HBaseTestingUtility.java:1359) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniCluster(HBaseTestingUtility.java:1341) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 10 Waited count: 11 Waiting on java.lang.ref.ReferenceQueue$Lock@23563e02 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@25484e2e Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@119017fd): State: TIMED_WAITING Blocked count: 0 Waited count: 1052 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 106 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1780917956-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f495c428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1780917956-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f495c428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1780917956-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f495c428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1780917956-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f495c428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1780917956-41-acceptor-0@36941b88-ServerConnector@5affd01d{HTTP/1.1, (http/1.1)}{localhost:42245}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1780917956-42): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1780917956-43): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1780917956-44): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-ada0b1c-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 30 Waited count: 3072 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3667e02c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 46775): State: TIMED_WAITING Blocked count: 1 Waited count: 54 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 105 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@b13635f): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 175 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@70667cc7): State: TIMED_WAITING Blocked count: 0 Waited count: 105 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 1 Waited count: 176 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 51914 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1516 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5a475f36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 46775): State: TIMED_WAITING Blocked count: 65 Waited count: 2284 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 46775): State: TIMED_WAITING Blocked count: 76 Waited count: 2296 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 46775): State: TIMED_WAITING Blocked count: 54 Waited count: 2284 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 46775): State: TIMED_WAITING Blocked count: 56 Waited count: 2277 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 46775): State: TIMED_WAITING Blocked count: 52 Waited count: 2282 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@b87406f): State: TIMED_WAITING Blocked count: 0 Waited count: 263 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@30bbf475): State: TIMED_WAITING Blocked count: 0 Waited count: 105 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@49f2f986): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@76d0b6c2): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(1274269859)): State: TIMED_WAITING Blocked count: 0 Waited count: 19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp686157005-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f495c428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp686157005-88-acceptor-0@1b5ce5a1-ServerConnector@499f7acb{HTTP/1.1, (http/1.1)}{localhost:46061}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp686157005-89): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp686157005-90): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-3e0cc4b9-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@54f57554): State: TIMED_WAITING Blocked count: 0 Waited count: 1048 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 38845): State: TIMED_WAITING Blocked count: 1 Waited count: 54 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 105 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 1 Waited count: 338 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@798ed1c2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-1041131206-172.17.0.2-1734051379265 heartbeating to localhost/127.0.0.1:46775): State: TIMED_WAITING Blocked count: 1346 Waited count: 1480 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@1bed2cc4): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 38845): State: TIMED_WAITING Blocked count: 0 Waited count: 526 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 38845): State: TIMED_WAITING Blocked count: 0 Waited count: 524 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 38845): State: TIMED_WAITING Blocked count: 0 Waited count: 524 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 38845): State: TIMED_WAITING Blocked count: 0 Waited count: 538 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 38845): State: TIMED_WAITING Blocked count: 0 Waited count: 559 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 118 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (qtp1838786507-119): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f495c428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (qtp1838786507-120-acceptor-0@63670ecd-ServerConnector@49e47e93{HTTP/1.1, (http/1.1)}{localhost:42911}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp1838786507-121): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp1838786507-124): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-12aecced-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (IPC Client (1941338361) connection to localhost/127.0.0.1:46775 from jenkins): State: TIMED_WAITING Blocked count: 1481 Waited count: 1482 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 123 (IPC Parameter Sending Thread for localhost/127.0.0.1:46775): State: TIMED_WAITING Blocked count: 0 Waited count: 2057 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@32493c28): State: TIMED_WAITING Blocked count: 0 Waited count: 1047 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 36287): State: TIMED_WAITING Blocked count: 1 Waited count: 54 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 105 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 3 Waited count: 333 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48de15bc Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-1041131206-172.17.0.2-1734051379265 heartbeating to localhost/127.0.0.1:46775): State: TIMED_WAITING Blocked count: 1332 Waited count: 1492 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@5d22c3e3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 139 (IPC Server handler 0 on default port 36287): State: TIMED_WAITING Blocked count: 0 Waited count: 524 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 1 on default port 36287): State: TIMED_WAITING Blocked count: 0 Waited count: 526 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 2 on default port 36287): State: TIMED_WAITING Blocked count: 0 Waited count: 526 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 3 on default port 36287): State: TIMED_WAITING Blocked count: 0 Waited count: 527 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 143 (IPC Server handler 4 on default port 36287): State: TIMED_WAITING Blocked count: 0 Waited count: 527 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 155 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp412220441-156): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f495c428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (qtp412220441-157-acceptor-0@52c6fc2f-ServerConnector@3e46a01f{HTTP/1.1, (http/1.1)}{localhost:39891}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (qtp412220441-159): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 160 (qtp412220441-160): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Session-HouseKeeper-7f6e076-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/dfs/data/data1)): State: TIMED_WAITING Blocked count: 3 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 162 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/dfs/data/data3)): State: TIMED_WAITING Blocked count: 15 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 164 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/dfs/data/data2)): State: TIMED_WAITING Blocked count: 14 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 165 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/dfs/data/data4)): State: TIMED_WAITING Blocked count: 14 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 172 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/dfs/data/data3/current/BP-1041131206-172.17.0.2-1734051379265): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 173 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/dfs/data/data1/current/BP-1041131206-172.17.0.2-1734051379265): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 176 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/dfs/data/data4/current/BP-1041131206-172.17.0.2-1734051379265): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 177 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/dfs/data/data2/current/BP-1041131206-172.17.0.2-1734051379265): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 185 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 190 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@513a50c7): State: TIMED_WAITING Blocked count: 0 Waited count: 1047 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 194 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 196 (IPC Server idle connection scanner for port 41429): State: TIMED_WAITING Blocked count: 1 Waited count: 54 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 195 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 197 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 199 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 105 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 202 (java.util.concurrent.ThreadPoolExecutor$Worker@7cf6afb[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 205 (java.util.concurrent.ThreadPoolExecutor$Worker@9201e9f[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 208 (Command processor): State: WAITING Blocked count: 1 Waited count: 337 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7b806033 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 209 (BP-1041131206-172.17.0.2-1734051379265 heartbeating to localhost/127.0.0.1:46775): State: TIMED_WAITING Blocked count: 1297 Waited count: 1482 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 210 (pool-46-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@4c3b7bbf): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 193 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 211 (IPC Server handler 0 on default port 41429): State: TIMED_WAITING Blocked count: 0 Waited count: 524 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 212 (IPC Server handler 1 on default port 41429): State: TIMED_WAITING Blocked count: 0 Waited count: 528 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 213 (IPC Server handler 2 on default port 41429): State: TIMED_WAITING Blocked count: 0 Waited count: 524 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 214 (IPC Server handler 3 on default port 41429): State: TIMED_WAITING Blocked count: 0 Waited count: 527 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 215 (IPC Server handler 4 on default port 41429): State: TIMED_WAITING Blocked count: 0 Waited count: 525 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 218 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/dfs/data/data5)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 219 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/dfs/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 223 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/dfs/data/data6/current/BP-1041131206-172.17.0.2-1734051379265): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 225 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/dfs/data/data5/current/BP-1041131206-172.17.0.2-1734051379265): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 229 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 232 (java.util.concurrent.ThreadPoolExecutor$Worker@4a9bc2b[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 237 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 18 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtility.java:599) Thread 239 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 240 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 241 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:49424): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 238 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 53 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 242 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 262 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 243 (SyncThread:0): State: WAITING Blocked count: 26 Waited count: 751 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@53924d3c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 244 (ProcessThread(sid:0 cport:49424):): State: WAITING Blocked count: 1 Waited count: 854 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7b89d53c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 245 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 878 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60775b1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 246 (NIOWorkerThread-1): State: WAITING Blocked count: 2 Waited count: 136 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e99a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6353d8e1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 394 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (RS-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 18 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (Time-limited test-SendThread(127.0.0.1:49424)): State: RUNNABLE Blocked count: 23 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 261 (Time-limited test-EventThread): State: WAITING Blocked count: 2 Waited count: 58 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@cb3cfff Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 262 (NIOWorkerThread-2): State: WAITING Blocked count: 3 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e99a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-3): State: WAITING Blocked count: 3 Waited count: 136 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e99a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-4): State: WAITING Blocked count: 3 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e99a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (zk-event-processor-pool-0): State: WAITING Blocked count: 37 Waited count: 88 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@626514b6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-5): State: WAITING Blocked count: 3 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e99a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-6): State: WAITING Blocked count: 3 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e99a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-7): State: WAITING Blocked count: 2 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e99a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-8): State: WAITING Blocked count: 0 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e99a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-9): State: WAITING Blocked count: 3 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e99a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-10): State: WAITING Blocked count: 1 Waited count: 135 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e99a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-11): State: WAITING Blocked count: 2 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e99a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-12): State: WAITING Blocked count: 1 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e99a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-13): State: WAITING Blocked count: 5 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e99a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-14): State: WAITING Blocked count: 1 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e99a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-15): State: WAITING Blocked count: 4 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e99a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (NIOWorkerThread-16): State: WAITING Blocked count: 3 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e99a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38293): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@1f8c5ccb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293): State: WAITING Blocked count: 234 Waited count: 926 Waiting on java.util.concurrent.Semaphore$NonfairSync@7fa65479 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 280 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293): State: WAITING Blocked count: 72 Waited count: 234 Waiting on java.util.concurrent.Semaphore$NonfairSync@6d78b045 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 281 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38293): State: WAITING Blocked count: 76 Waited count: 6449 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@27f2c9f8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 282 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38293): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5ae5b3c9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 283 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38293): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5ae5b3c9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=38293): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@172d3170 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=38293): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@3fff5148 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 286 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=38293): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@e73e246 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 287 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=38293): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@79ff63ba Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 291 (RS-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 313 (RS-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 335 (RS-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 32 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 289 (M:0;9812beb1e4cd:38293): State: TIMED_WAITING Blocked count: 7 Waited count: 2803 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:879) app//org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doSync(AsyncFSWAL.java:671) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$1(AbstractFSWAL.java:600) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$953/0x00007f495cf1e0c8.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:600) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:590) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2869) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2811) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2670) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2644) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2635) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1810) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1631) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1586) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1569) Thread 358 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 53 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 360 (master/9812beb1e4cd:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 362 (master/9812beb1e4cd:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 364 (org.apache.hadoop.hdfs.PeerCache@7427d00f): State: TIMED_WAITING Blocked count: 0 Waited count: 174 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 383 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 5192 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:179) Thread 398 (RS-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 54 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 399 (RS-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 51 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 409 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 68 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5187ef5a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 420 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 52 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 408 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 51865 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 429 (RS-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 30 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 430 (RS-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 17 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 453 (RegionServerTracker-0): State: WAITING Blocked count: 8 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17d471e3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 470 (regionserver/9812beb1e4cd:0.procedureResultReporter): State: WAITING Blocked count: 14 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@797c797a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 473 (regionserver/9812beb1e4cd:0.procedureResultReporter): State: WAITING Blocked count: 16 Waited count: 33 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@176fb430 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 481 (regionserver/9812beb1e4cd:0.procedureResultReporter): State: WAITING Blocked count: 11 Waited count: 23 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4c691053 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 518 (region-location-0): State: WAITING Blocked count: 10 Waited count: 17 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@78063b54 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 534 (RS-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 540 (RS-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 543 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 51617 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 557 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 16 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 597 (region-location-1): State: WAITING Blocked count: 4 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@78063b54 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 598 (region-location-2): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@78063b54 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 599 (region-location-3): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@78063b54 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1006 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 432 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1072 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 9 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1110 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 59 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@c60be1c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1168 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 3 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1522 (Container metrics unregistration): State: WAITING Blocked count: 10 Waited count: 33 Waiting on java.util.TaskQueue@4575f85f Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1998 (RS-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2008 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2009 (RS-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3329 (region-location-4): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@78063b54 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4696 (java.util.concurrent.ThreadPoolExecutor$Worker@193c3054[State = -1, empty queue]): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7eb11d74 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4940 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4941 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4942 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8923 (AsyncFSWAL-1-hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/MasterData-prefix:9812beb1e4cd,38293,1734051383822): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@82e9798 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8936 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 22 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-12-13T01:05:22,775 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-13T01:05:52,775 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-13T01:06:05,288 DEBUG [M:0;9812beb1e4cd:38293 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-13T01:06:05,288 WARN [M:0;9812beb1e4cd:38293 {}] region.MasterRegion(134): Failed to close region org.apache.hadoop.hbase.regionserver.wal.WALSyncTimeoutIOException: org.apache.hadoop.hbase.exceptions.TimeoutIOException: Failed to get sync result after 300000 ms for txid=3721, WAL system stuck? at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:883) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doSync(AsyncFSWAL.java:671) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$1(AbstractFSWAL.java:600) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:600) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:590) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2869) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2811) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2670) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2644) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2635) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1810) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1631) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1586) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1569) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.closeRegion(MasterRegion.java:132) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.close(MasterRegion.java:205) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.stopServiceThreads(HMaster.java:1758) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:1285) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:603) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.exceptions.TimeoutIOException: Failed to get sync result after 300000 ms for txid=3721, WAL system stuck? at org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:171) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:879) ~[classes/:?] ... 20 more 2024-12-13T01:06:05,289 WARN [Close-WAL-Writer-0 {}] wal.AsyncProtobufLogWriter(163): normal close failed, try recover java.lang.NullPointerException: Cannot invoke "org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf.ensureWritable(int)" because "this.buf" is null at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.writeInt(FanOutOneBlockAsyncDFSOutput.java:396) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.lambda$writeWALTrailerAndMagic$3(AsyncProtobufLogWriter.java:243) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.writeWALMetadata(AsyncProtobufLogWriter.java:201) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.writeWALTrailerAndMagic(AsyncProtobufLogWriter.java:236) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractProtobufLogWriter.writeWALTrailer(AbstractProtobufLogWriter.java:252) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:160) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.lambda$closeWriter$5(AsyncFSWAL.java:732) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T01:06:05,291 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-12-13T01:06:05,291 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-12-13T01:06:05,291 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file /user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/MasterData/WALs/9812beb1e4cd,38293,1734051383822/9812beb1e4cd%2C38293%2C1734051383822.1734051385705 2024-12-13T01:06:05,292 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=0 on file=/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/MasterData/WALs/9812beb1e4cd,38293,1734051383822/9812beb1e4cd%2C38293%2C1734051383822.1734051385705 after 1ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.recoverAndClose(FanOutOneBlockAsyncDFSOutput.java:610) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:164) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.lambda$closeWriter$5(AsyncFSWAL.java:732) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T01:06:05,292 WARN [Close-WAL-Writer-0 {}] wal.AsyncFSWAL(734): close old writer failed. java.io.InterruptedIOException: Operation cancelled at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.checkIfCancelled(RecoverLeaseFSUtils.java:269) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:159) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.recoverAndClose(FanOutOneBlockAsyncDFSOutput.java:610) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:164) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.lambda$closeWriter$5(AsyncFSWAL.java:732) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T01:06:05,292 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/MasterData/WALs/9812beb1e4cd,38293,1734051383822/9812beb1e4cd%2C38293%2C1734051383822.1734051385705 2024-12-13T01:06:05,292 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=0 on file=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/MasterData/WALs/9812beb1e4cd,38293,1734051383822/9812beb1e4cd%2C38293%2C1734051383822.1734051385705 after 0ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.recoverLease(AsyncFSWAL.java:722) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.lambda$closeWriter$5(AsyncFSWAL.java:735) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;9812beb1e4cd:38293 222 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 8 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 19 Waited count: 13 Waiting on java.lang.ref.ReferenceQueue$Lock@a5b06c1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 15 Waited count: 21 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 31 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4b79ac3a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 5886 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 59 Waiting on java.util.concurrent.CountDownLatch$Sync@4b0aaad4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 12186 Waited count: 12702 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:394) app//org.apache.hadoop.hbase.MiniHBaseCluster.waitUntilShutDown(MiniHBaseCluster.java:921) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniHBaseCluster(HBaseTestingUtility.java:1359) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniCluster(HBaseTestingUtility.java:1341) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 10 Waited count: 11 Waiting on java.lang.ref.ReferenceQueue$Lock@23563e02 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@25484e2e Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@119017fd): State: TIMED_WAITING Blocked count: 0 Waited count: 1172 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 118 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1780917956-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f495c428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1780917956-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f495c428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1780917956-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f495c428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1780917956-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f495c428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1780917956-41-acceptor-0@36941b88-ServerConnector@5affd01d{HTTP/1.1, (http/1.1)}{localhost:42245}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1780917956-42): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1780917956-43): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1780917956-44): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-ada0b1c-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 30 Waited count: 3072 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3667e02c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 46775): State: TIMED_WAITING Blocked count: 1 Waited count: 60 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 117 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@b13635f): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 195 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@70667cc7): State: TIMED_WAITING Blocked count: 0 Waited count: 117 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 1 Waited count: 196 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 57877 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1516 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5a475f36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 46775): State: TIMED_WAITING Blocked count: 70 Waited count: 2345 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 46775): State: TIMED_WAITING Blocked count: 76 Waited count: 2357 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 46775): State: TIMED_WAITING Blocked count: 54 Waited count: 2346 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 46775): State: TIMED_WAITING Blocked count: 58 Waited count: 2338 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 46775): State: TIMED_WAITING Blocked count: 54 Waited count: 2343 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@b87406f): State: TIMED_WAITING Blocked count: 0 Waited count: 293 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@30bbf475): State: TIMED_WAITING Blocked count: 0 Waited count: 117 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@49f2f986): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@76d0b6c2): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(1274269859)): State: TIMED_WAITING Blocked count: 0 Waited count: 21 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp686157005-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f495c428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp686157005-88-acceptor-0@1b5ce5a1-ServerConnector@499f7acb{HTTP/1.1, (http/1.1)}{localhost:46061}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp686157005-89): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp686157005-90): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-3e0cc4b9-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@54f57554): State: TIMED_WAITING Blocked count: 0 Waited count: 1168 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 38845): State: TIMED_WAITING Blocked count: 1 Waited count: 60 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 117 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 1 Waited count: 358 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@798ed1c2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-1041131206-172.17.0.2-1734051379265 heartbeating to localhost/127.0.0.1:46775): State: TIMED_WAITING Blocked count: 1366 Waited count: 1525 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@1bed2cc4): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 38845): State: TIMED_WAITING Blocked count: 0 Waited count: 586 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 38845): State: TIMED_WAITING Blocked count: 0 Waited count: 584 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 38845): State: TIMED_WAITING Blocked count: 0 Waited count: 584 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 38845): State: TIMED_WAITING Blocked count: 0 Waited count: 603 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 38845): State: TIMED_WAITING Blocked count: 0 Waited count: 620 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 118 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (qtp1838786507-119): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f495c428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (qtp1838786507-120-acceptor-0@63670ecd-ServerConnector@49e47e93{HTTP/1.1, (http/1.1)}{localhost:42911}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp1838786507-121): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp1838786507-124): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-12aecced-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (IPC Client (1941338361) connection to localhost/127.0.0.1:46775 from jenkins): State: TIMED_WAITING Blocked count: 1531 Waited count: 1532 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 123 (IPC Parameter Sending Thread for localhost/127.0.0.1:46775): State: TIMED_WAITING Blocked count: 0 Waited count: 2110 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@32493c28): State: TIMED_WAITING Blocked count: 0 Waited count: 1167 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 36287): State: TIMED_WAITING Blocked count: 1 Waited count: 60 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 117 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 3 Waited count: 353 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48de15bc Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-1041131206-172.17.0.2-1734051379265 heartbeating to localhost/127.0.0.1:46775): State: TIMED_WAITING Blocked count: 1352 Waited count: 1534 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@5d22c3e3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 139 (IPC Server handler 0 on default port 36287): State: TIMED_WAITING Blocked count: 0 Waited count: 584 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 1 on default port 36287): State: TIMED_WAITING Blocked count: 0 Waited count: 586 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 2 on default port 36287): State: TIMED_WAITING Blocked count: 0 Waited count: 586 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 3 on default port 36287): State: TIMED_WAITING Blocked count: 0 Waited count: 587 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 143 (IPC Server handler 4 on default port 36287): State: TIMED_WAITING Blocked count: 0 Waited count: 587 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 155 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp412220441-156): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f495c428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (qtp412220441-157-acceptor-0@52c6fc2f-ServerConnector@3e46a01f{HTTP/1.1, (http/1.1)}{localhost:39891}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (qtp412220441-159): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 160 (qtp412220441-160): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Session-HouseKeeper-7f6e076-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/dfs/data/data1)): State: TIMED_WAITING Blocked count: 3 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 162 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/dfs/data/data3)): State: TIMED_WAITING Blocked count: 15 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 164 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/dfs/data/data2)): State: TIMED_WAITING Blocked count: 14 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 165 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/dfs/data/data4)): State: TIMED_WAITING Blocked count: 14 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 172 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/dfs/data/data3/current/BP-1041131206-172.17.0.2-1734051379265): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 173 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/dfs/data/data1/current/BP-1041131206-172.17.0.2-1734051379265): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 176 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/dfs/data/data4/current/BP-1041131206-172.17.0.2-1734051379265): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 177 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/dfs/data/data2/current/BP-1041131206-172.17.0.2-1734051379265): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 185 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 190 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@513a50c7): State: TIMED_WAITING Blocked count: 0 Waited count: 1167 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 194 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 196 (IPC Server idle connection scanner for port 41429): State: TIMED_WAITING Blocked count: 1 Waited count: 60 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 195 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 197 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 199 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 117 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 202 (java.util.concurrent.ThreadPoolExecutor$Worker@7cf6afb[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 205 (java.util.concurrent.ThreadPoolExecutor$Worker@9201e9f[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 208 (Command processor): State: WAITING Blocked count: 1 Waited count: 357 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7b806033 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 209 (BP-1041131206-172.17.0.2-1734051379265 heartbeating to localhost/127.0.0.1:46775): State: TIMED_WAITING Blocked count: 1317 Waited count: 1522 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 210 (pool-46-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@4c3b7bbf): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 193 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 211 (IPC Server handler 0 on default port 41429): State: TIMED_WAITING Blocked count: 0 Waited count: 584 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 212 (IPC Server handler 1 on default port 41429): State: TIMED_WAITING Blocked count: 0 Waited count: 588 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 213 (IPC Server handler 2 on default port 41429): State: TIMED_WAITING Blocked count: 0 Waited count: 584 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 214 (IPC Server handler 3 on default port 41429): State: TIMED_WAITING Blocked count: 0 Waited count: 587 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 215 (IPC Server handler 4 on default port 41429): State: TIMED_WAITING Blocked count: 0 Waited count: 585 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 218 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/dfs/data/data5)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 219 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/dfs/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 223 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/dfs/data/data6/current/BP-1041131206-172.17.0.2-1734051379265): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 225 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/dfs/data/data5/current/BP-1041131206-172.17.0.2-1734051379265): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 229 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 232 (java.util.concurrent.ThreadPoolExecutor$Worker@4a9bc2b[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 237 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 20 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtility.java:599) Thread 239 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 240 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 241 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:49424): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 238 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 59 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 242 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 292 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 243 (SyncThread:0): State: WAITING Blocked count: 26 Waited count: 756 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@53924d3c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 244 (ProcessThread(sid:0 cport:49424):): State: WAITING Blocked count: 1 Waited count: 859 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7b89d53c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 245 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 883 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60775b1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 246 (NIOWorkerThread-1): State: WAITING Blocked count: 2 Waited count: 136 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e99a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6353d8e1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 432 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (RS-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 18 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (Time-limited test-SendThread(127.0.0.1:49424)): State: RUNNABLE Blocked count: 23 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 261 (Time-limited test-EventThread): State: WAITING Blocked count: 2 Waited count: 58 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@cb3cfff Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 262 (NIOWorkerThread-2): State: WAITING Blocked count: 3 Waited count: 135 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e99a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-3): State: WAITING Blocked count: 3 Waited count: 137 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e99a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-4): State: WAITING Blocked count: 3 Waited count: 135 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e99a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (zk-event-processor-pool-0): State: WAITING Blocked count: 37 Waited count: 88 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@626514b6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-5): State: WAITING Blocked count: 3 Waited count: 135 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e99a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-6): State: WAITING Blocked count: 3 Waited count: 135 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e99a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-7): State: WAITING Blocked count: 2 Waited count: 135 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e99a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-8): State: WAITING Blocked count: 0 Waited count: 135 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e99a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-9): State: WAITING Blocked count: 3 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e99a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-10): State: WAITING Blocked count: 1 Waited count: 135 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e99a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-11): State: WAITING Blocked count: 2 Waited count: 135 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e99a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-12): State: WAITING Blocked count: 1 Waited count: 135 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e99a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-13): State: WAITING Blocked count: 5 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e99a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-14): State: WAITING Blocked count: 1 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e99a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-15): State: WAITING Blocked count: 4 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e99a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (NIOWorkerThread-16): State: WAITING Blocked count: 3 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e99a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38293): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@1f8c5ccb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38293): State: WAITING Blocked count: 234 Waited count: 926 Waiting on java.util.concurrent.Semaphore$NonfairSync@7fa65479 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 280 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38293): State: WAITING Blocked count: 72 Waited count: 234 Waiting on java.util.concurrent.Semaphore$NonfairSync@6d78b045 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 281 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38293): State: WAITING Blocked count: 76 Waited count: 6449 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@27f2c9f8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 282 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38293): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5ae5b3c9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 283 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38293): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5ae5b3c9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=38293): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@172d3170 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=38293): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@3fff5148 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 286 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=38293): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@e73e246 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 287 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=38293): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@79ff63ba Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 291 (RS-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 313 (RS-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 335 (RS-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 32 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 289 (M:0;9812beb1e4cd:38293): State: TIMED_WAITING Blocked count: 7 Waited count: 2804 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.shutdown(AbstractFSWAL.java:1011) app//org.apache.hadoop.hbase.wal.AbstractFSWALProvider.shutdown(AbstractFSWALProvider.java:184) app//org.apache.hadoop.hbase.wal.WALFactory.shutdown(WALFactory.java:272) app//org.apache.hadoop.hbase.master.region.MasterRegion.shutdownWAL(MasterRegion.java:140) app//org.apache.hadoop.hbase.master.region.MasterRegion.close(MasterRegion.java:206) app//org.apache.hadoop.hbase.master.HMaster.stopServiceThreads(HMaster.java:1758) app//org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:1285) app//org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:603) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 358 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 59 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 360 (master/9812beb1e4cd:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 362 (master/9812beb1e4cd:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 364 (org.apache.hadoop.hdfs.PeerCache@7427d00f): State: TIMED_WAITING Blocked count: 0 Waited count: 194 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 383 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 5791 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:179) Thread 398 (RS-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 54 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 399 (RS-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 51 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 409 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 68 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5187ef5a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 420 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 408 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 57867 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 429 (RS-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 30 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 430 (RS-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 17 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 453 (RegionServerTracker-0): State: WAITING Blocked count: 8 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17d471e3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 470 (regionserver/9812beb1e4cd:0.procedureResultReporter): State: WAITING Blocked count: 14 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@797c797a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 473 (regionserver/9812beb1e4cd:0.procedureResultReporter): State: WAITING Blocked count: 16 Waited count: 33 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@176fb430 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 481 (regionserver/9812beb1e4cd:0.procedureResultReporter): State: WAITING Blocked count: 11 Waited count: 23 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4c691053 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 518 (region-location-0): State: WAITING Blocked count: 10 Waited count: 17 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@78063b54 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 534 (RS-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 540 (RS-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 543 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 57619 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 557 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 16 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 597 (region-location-1): State: WAITING Blocked count: 4 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@78063b54 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 598 (region-location-2): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@78063b54 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 599 (region-location-3): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@78063b54 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1006 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 438 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1072 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 9 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1110 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 59 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@c60be1c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1168 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 3 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1522 (Container metrics unregistration): State: WAITING Blocked count: 10 Waited count: 33 Waiting on java.util.TaskQueue@4575f85f Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1998 (RS-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2008 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2009 (RS-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3329 (region-location-4): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@78063b54 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4696 (java.util.concurrent.ThreadPoolExecutor$Worker@193c3054[State = -1, empty queue]): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7eb11d74 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4940 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4941 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4942 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8923 (AsyncFSWAL-1-hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/MasterData-prefix:9812beb1e4cd,38293,1734051383822): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@82e9798 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8936 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 28 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 8937 (process reaper): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8941 (WAL-Shutdown-0): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.awaitTermination(ThreadPoolExecutor.java:1464) app//org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doShutdown(AsyncFSWAL.java:793) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$2.call(AbstractFSWAL.java:995) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$2.call(AbstractFSWAL.java:990) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8942 (Close-WAL-Writer-0): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:166) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.recoverLease(AsyncFSWAL.java:722) app//org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.lambda$closeWriter$5(AsyncFSWAL.java:735) app//org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL$$Lambda$1122/0x00007f495d162ba8.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) 2024-12-13T01:06:09,293 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=1 on file=hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/MasterData/WALs/9812beb1e4cd,38293,1734051383822/9812beb1e4cd%2C38293%2C1734051383822.1734051385705 after 4001ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.recoverLease(AsyncFSWAL.java:722) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.lambda$closeWriter$5(AsyncFSWAL.java:735) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T01:06:10,289 ERROR [WAL-Shutdown-0 {}] wal.AsyncFSWAL(794): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.async.wait.on.shutdown.seconds" 2024-12-13T01:06:10,290 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-13T01:06:10,290 INFO [M:0;9812beb1e4cd:38293 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-12-13T01:06:10,290 INFO [M:0;9812beb1e4cd:38293 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:38293 2024-12-13T01:06:10,292 DEBUG [M:0;9812beb1e4cd:38293 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/9812beb1e4cd,38293,1734051383822 already deleted, retry=false 2024-12-13T01:06:10,295 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46775/user/jenkins/test-data/6ea3ece7-10f7-f1d8-2c6d-1c06276fbdee/MasterData/WALs/9812beb1e4cd,38293,1734051383822/9812beb1e4cd%2C38293%2C1734051383822.1734051385705 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.recoverLease(AsyncFSWAL.java:722) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.lambda$closeWriter$5(AsyncFSWAL.java:735) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 12 more 2024-12-13T01:06:10,393 INFO [M:0;9812beb1e4cd:38293 {}] regionserver.HRegionServer(1307): Exiting; stopping=9812beb1e4cd,38293,1734051383822; zookeeper connection closed. 2024-12-13T01:06:10,393 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-13T01:06:10,393 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38293-0x10088dfff310000, quorum=127.0.0.1:49424, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-13T01:06:10,397 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@167bbb68{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-13T01:06:10,397 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3e46a01f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-13T01:06:10,397 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-13T01:06:10,397 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1adb2d24{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-13T01:06:10,398 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@687e9e0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/hadoop.log.dir/,STOPPED} 2024-12-13T01:06:10,399 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-13T01:06:10,399 WARN [BP-1041131206-172.17.0.2-1734051379265 heartbeating to localhost/127.0.0.1:46775 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-13T01:06:10,399 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-13T01:06:10,399 WARN [BP-1041131206-172.17.0.2-1734051379265 heartbeating to localhost/127.0.0.1:46775 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1041131206-172.17.0.2-1734051379265 (Datanode Uuid 2ce12a41-d513-4135-a1ea-08b97fb0db53) service to localhost/127.0.0.1:46775 2024-12-13T01:06:10,401 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/dfs/data/data5/current/BP-1041131206-172.17.0.2-1734051379265 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-13T01:06:10,401 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/dfs/data/data6/current/BP-1041131206-172.17.0.2-1734051379265 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-13T01:06:10,401 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-13T01:06:10,403 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@30330659{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-13T01:06:10,403 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@49e47e93{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-13T01:06:10,404 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-13T01:06:10,404 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5abba5e0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-13T01:06:10,404 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@26ed28d8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/hadoop.log.dir/,STOPPED} 2024-12-13T01:06:10,408 WARN [BP-1041131206-172.17.0.2-1734051379265 heartbeating to localhost/127.0.0.1:46775 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-13T01:06:10,408 WARN [BP-1041131206-172.17.0.2-1734051379265 heartbeating to localhost/127.0.0.1:46775 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1041131206-172.17.0.2-1734051379265 (Datanode Uuid b510152c-04ee-4459-a0d2-f69dbaee7234) service to localhost/127.0.0.1:46775 2024-12-13T01:06:10,408 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-13T01:06:10,408 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-13T01:06:10,409 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/dfs/data/data3/current/BP-1041131206-172.17.0.2-1734051379265 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-13T01:06:10,409 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/dfs/data/data4/current/BP-1041131206-172.17.0.2-1734051379265 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-13T01:06:10,409 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-13T01:06:10,411 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@264b92dd{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-13T01:06:10,412 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@499f7acb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-13T01:06:10,412 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-13T01:06:10,412 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2f32c184{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-13T01:06:10,412 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@669430a5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/hadoop.log.dir/,STOPPED} 2024-12-13T01:06:10,413 WARN [BP-1041131206-172.17.0.2-1734051379265 heartbeating to localhost/127.0.0.1:46775 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-13T01:06:10,413 WARN [BP-1041131206-172.17.0.2-1734051379265 heartbeating to localhost/127.0.0.1:46775 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1041131206-172.17.0.2-1734051379265 (Datanode Uuid 11136e46-9b18-4895-bffb-608428d5bcef) service to localhost/127.0.0.1:46775 2024-12-13T01:06:10,413 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-13T01:06:10,413 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-13T01:06:10,414 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/dfs/data/data1/current/BP-1041131206-172.17.0.2-1734051379265 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-13T01:06:10,414 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/cluster_48f3b076-4591-d7ef-18ce-83e6dcc74446/dfs/data/data2/current/BP-1041131206-172.17.0.2-1734051379265 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-13T01:06:10,414 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-13T01:06:10,421 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@570847c1{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-13T01:06:10,422 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5affd01d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-13T01:06:10,422 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-13T01:06:10,422 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@71fea0d0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-13T01:06:10,422 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@33a7d9f8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bd782a99-79b2-3f45-d14e-66c4453b187e/hadoop.log.dir/,STOPPED} 2024-12-13T01:06:10,435 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-12-13T01:06:10,694 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down