2024-11-14 21:51:53,721 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@4e5ed836 2024-11-14 21:51:53,744 main DEBUG Took 0.020163 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-14 21:51:53,745 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-14 21:51:53,746 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-14 21:51:53,747 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-14 21:51:53,759 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-14 21:51:53,771 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-14 21:51:53,787 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 21:51:53,790 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-14 21:51:53,791 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 21:51:53,791 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-14 21:51:53,792 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 21:51:53,792 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-14 21:51:53,793 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 21:51:53,794 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-14 21:51:53,795 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 21:51:53,795 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-14 21:51:53,803 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 21:51:53,803 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-14 21:51:53,804 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 21:51:53,805 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-14 21:51:53,805 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 21:51:53,806 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-14 21:51:53,806 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 21:51:53,807 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-14 21:51:53,807 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 21:51:53,808 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-14 21:51:53,808 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 21:51:53,810 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-14 21:51:53,810 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 21:51:53,811 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-14 21:51:53,811 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 21:51:53,812 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-14 21:51:53,814 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 21:51:53,816 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-14 21:51:53,819 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-14 21:51:53,819 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-14 21:51:53,821 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-14 21:51:53,821 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-14 21:51:53,833 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-14 21:51:53,836 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-14 21:51:53,838 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-14 21:51:53,839 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-14 21:51:53,839 main DEBUG createAppenders(={Console}) 2024-11-14 21:51:53,840 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@4e5ed836 initialized 2024-11-14 21:51:53,841 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@4e5ed836 2024-11-14 21:51:53,841 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@4e5ed836 OK. 2024-11-14 21:51:53,842 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-14 21:51:53,842 main DEBUG OutputStream closed 2024-11-14 21:51:53,842 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-14 21:51:53,842 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-14 21:51:53,843 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@f5acb9d OK 2024-11-14 21:51:53,985 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-14 21:51:53,988 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-14 21:51:53,989 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-14 21:51:53,990 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-14 21:51:53,991 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-14 21:51:53,992 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-14 21:51:53,992 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-14 21:51:53,992 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-14 21:51:53,993 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-14 21:51:53,993 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-14 21:51:53,994 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-14 21:51:53,994 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-14 21:51:53,994 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-14 21:51:53,995 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-14 21:51:53,995 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-14 21:51:53,996 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-14 21:51:53,996 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-14 21:51:53,997 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-14 21:51:54,000 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-14 21:51:54,001 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-logging/target/hbase-logging-3.0.0-beta-2-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@ab7395e) with optional ClassLoader: null 2024-11-14 21:51:54,001 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-14 21:51:54,002 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@ab7395e] started OK. 2024-11-14T21:51:54,023 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.TestRegionServerReportForDuty timeout: 13 mins 2024-11-14 21:51:54,027 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-14 21:51:54,028 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-14T21:51:54,096 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.TestRegionServerReportForDuty#testReportForDutyWithRSRpcRetry Thread=11, OpenFileDescriptor=285, MaxFileDescriptor=1048576, SystemLoadAverage=785, ProcessCount=11, AvailableMemoryMB=3589 2024-11-14T21:51:54,715 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e7af7e66-6ea9-7b13-a13c-77d3544fa607 2024-11-14T21:51:54,740 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e7af7e66-6ea9-7b13-a13c-77d3544fa607/cluster_1ed275ca-d3f5-08d0-75ee-63f8b9a2d0b2, deleteOnExit=true 2024-11-14T21:51:54,742 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e7af7e66-6ea9-7b13-a13c-77d3544fa607/test.cache.data in system properties and HBase conf 2024-11-14T21:51:54,743 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e7af7e66-6ea9-7b13-a13c-77d3544fa607/hadoop.tmp.dir in system properties and HBase conf 2024-11-14T21:51:54,745 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e7af7e66-6ea9-7b13-a13c-77d3544fa607/hadoop.log.dir in system properties and HBase conf 2024-11-14T21:51:54,746 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e7af7e66-6ea9-7b13-a13c-77d3544fa607/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-14T21:51:54,748 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e7af7e66-6ea9-7b13-a13c-77d3544fa607/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-14T21:51:54,749 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-14T21:51:54,911 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-14T21:51:55,029 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-14T21:51:55,035 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e7af7e66-6ea9-7b13-a13c-77d3544fa607/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-14T21:51:55,036 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e7af7e66-6ea9-7b13-a13c-77d3544fa607/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-14T21:51:55,036 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e7af7e66-6ea9-7b13-a13c-77d3544fa607/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-14T21:51:55,037 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e7af7e66-6ea9-7b13-a13c-77d3544fa607/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-14T21:51:55,037 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e7af7e66-6ea9-7b13-a13c-77d3544fa607/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-14T21:51:55,038 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e7af7e66-6ea9-7b13-a13c-77d3544fa607/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-14T21:51:55,039 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e7af7e66-6ea9-7b13-a13c-77d3544fa607/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-14T21:51:55,039 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e7af7e66-6ea9-7b13-a13c-77d3544fa607/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-14T21:51:55,040 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e7af7e66-6ea9-7b13-a13c-77d3544fa607/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-14T21:51:55,040 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e7af7e66-6ea9-7b13-a13c-77d3544fa607/nfs.dump.dir in system properties and HBase conf 2024-11-14T21:51:55,041 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e7af7e66-6ea9-7b13-a13c-77d3544fa607/java.io.tmpdir in system properties and HBase conf 2024-11-14T21:51:55,042 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e7af7e66-6ea9-7b13-a13c-77d3544fa607/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-14T21:51:55,043 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e7af7e66-6ea9-7b13-a13c-77d3544fa607/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-14T21:51:55,043 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e7af7e66-6ea9-7b13-a13c-77d3544fa607/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-14T21:51:56,220 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-14T21:51:56,364 INFO [Time-limited test {}] log.Log(170): Logging initialized @3803ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-14T21:51:56,482 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T21:51:56,605 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T21:51:56,656 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T21:51:56,656 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T21:51:56,658 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-14T21:51:56,681 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T21:51:56,688 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@323438b9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e7af7e66-6ea9-7b13-a13c-77d3544fa607/hadoop.log.dir/,AVAILABLE} 2024-11-14T21:51:56,689 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@757ebaf5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T21:51:56,978 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@70725491{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e7af7e66-6ea9-7b13-a13c-77d3544fa607/java.io.tmpdir/jetty-localhost-33317-hadoop-hdfs-3_4_1-tests_jar-_-any-3438139767787285819/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-14T21:51:56,993 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@13e3fa63{HTTP/1.1, (http/1.1)}{localhost:33317} 2024-11-14T21:51:56,994 INFO [Time-limited test {}] server.Server(415): Started @4435ms 2024-11-14T21:51:57,588 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T21:51:57,599 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T21:51:57,601 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T21:51:57,601 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T21:51:57,602 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-14T21:51:57,603 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2db0d459{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e7af7e66-6ea9-7b13-a13c-77d3544fa607/hadoop.log.dir/,AVAILABLE} 2024-11-14T21:51:57,604 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5786f013{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T21:51:57,772 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@a7824a8{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e7af7e66-6ea9-7b13-a13c-77d3544fa607/java.io.tmpdir/jetty-localhost-34199-hadoop-hdfs-3_4_1-tests_jar-_-any-3040395510510303544/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T21:51:57,773 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@b9beb2c{HTTP/1.1, (http/1.1)}{localhost:34199} 2024-11-14T21:51:57,773 INFO [Time-limited test {}] server.Server(415): Started @5214ms 2024-11-14T21:51:57,842 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T21:51:58,506 WARN [Thread-72 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e7af7e66-6ea9-7b13-a13c-77d3544fa607/cluster_1ed275ca-d3f5-08d0-75ee-63f8b9a2d0b2/data/data2/current/BP-1239955793-172.17.0.2-1731621115837/current, will proceed with Du for space computation calculation, 2024-11-14T21:51:58,506 WARN [Thread-71 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e7af7e66-6ea9-7b13-a13c-77d3544fa607/cluster_1ed275ca-d3f5-08d0-75ee-63f8b9a2d0b2/data/data1/current/BP-1239955793-172.17.0.2-1731621115837/current, will proceed with Du for space computation calculation, 2024-11-14T21:51:58,657 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T21:51:58,769 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd8acda42ceccf043 with lease ID 0x61ba87364aa42bd6: Processing first storage report for DS-81490912-7394-45a2-b5f5-575a450b3a00 from datanode DatanodeRegistration(127.0.0.1:35747, datanodeUuid=e11a5eb3-c5ea-4473-b4c4-030b79c48af2, infoPort=44917, infoSecurePort=0, ipcPort=38563, storageInfo=lv=-57;cid=testClusterID;nsid=1647608981;c=1731621115837) 2024-11-14T21:51:58,771 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd8acda42ceccf043 with lease ID 0x61ba87364aa42bd6: from storage DS-81490912-7394-45a2-b5f5-575a450b3a00 node DatanodeRegistration(127.0.0.1:35747, datanodeUuid=e11a5eb3-c5ea-4473-b4c4-030b79c48af2, infoPort=44917, infoSecurePort=0, ipcPort=38563, storageInfo=lv=-57;cid=testClusterID;nsid=1647608981;c=1731621115837), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-11-14T21:51:58,772 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd8acda42ceccf043 with lease ID 0x61ba87364aa42bd6: Processing first storage report for DS-79bf6f8a-6a1f-4c7b-8fa8-c0f169e3bed5 from datanode DatanodeRegistration(127.0.0.1:35747, datanodeUuid=e11a5eb3-c5ea-4473-b4c4-030b79c48af2, infoPort=44917, infoSecurePort=0, ipcPort=38563, storageInfo=lv=-57;cid=testClusterID;nsid=1647608981;c=1731621115837) 2024-11-14T21:51:58,772 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd8acda42ceccf043 with lease ID 0x61ba87364aa42bd6: from storage DS-79bf6f8a-6a1f-4c7b-8fa8-c0f169e3bed5 node DatanodeRegistration(127.0.0.1:35747, datanodeUuid=e11a5eb3-c5ea-4473-b4c4-030b79c48af2, infoPort=44917, infoSecurePort=0, ipcPort=38563, storageInfo=lv=-57;cid=testClusterID;nsid=1647608981;c=1731621115837), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T21:51:58,787 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e7af7e66-6ea9-7b13-a13c-77d3544fa607 2024-11-14T21:51:58,894 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e7af7e66-6ea9-7b13-a13c-77d3544fa607/cluster_1ed275ca-d3f5-08d0-75ee-63f8b9a2d0b2/zookeeper_0, clientPort=55688, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e7af7e66-6ea9-7b13-a13c-77d3544fa607/cluster_1ed275ca-d3f5-08d0-75ee-63f8b9a2d0b2/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e7af7e66-6ea9-7b13-a13c-77d3544fa607/cluster_1ed275ca-d3f5-08d0-75ee-63f8b9a2d0b2/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-14T21:51:58,905 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=55688 2024-11-14T21:51:58,919 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T21:51:58,923 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T21:51:59,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35747 is added to blk_1073741825_1001 (size=7) 2024-11-14T21:51:59,719 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:45089/user/jenkins/test-data/d2cf36fe-e8a1-d9af-fde9-1a9bbb9106bc with version=8 2024-11-14T21:51:59,855 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-14T21:52:00,190 INFO [Time-limited test {}] client.ConnectionUtils(128): master/ef788a4cade8:61512 server-side Connection retries=45 2024-11-14T21:52:00,205 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T21:52:00,206 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T21:52:00,225 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T21:52:00,226 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T21:52:00,226 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T21:52:00,409 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-14T21:52:00,506 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-14T21:52:00,517 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-14T21:52:00,523 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T21:52:00,558 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 30584 (auto-detected) 2024-11-14T21:52:00,560 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-14T21:52:00,579 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:61512 2024-11-14T21:52:00,604 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:61512 connecting to ZooKeeper ensemble=127.0.0.1:55688 2024-11-14T21:52:00,641 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:615120x0, quorum=127.0.0.1:55688, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T21:52:00,644 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:61512-0x100ac6cf3b80000 connected 2024-11-14T21:52:00,691 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T21:52:00,694 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T21:52:00,707 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:61512-0x100ac6cf3b80000, quorum=127.0.0.1:55688, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T21:52:00,712 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:45089/user/jenkins/test-data/d2cf36fe-e8a1-d9af-fde9-1a9bbb9106bc, hbase.cluster.distributed=false 2024-11-14T21:52:00,741 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:61512-0x100ac6cf3b80000, quorum=127.0.0.1:55688, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T21:52:00,751 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=61512 2024-11-14T21:52:00,752 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=61512 2024-11-14T21:52:00,753 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=61512 2024-11-14T21:52:00,762 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=61512 2024-11-14T21:52:00,763 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=61512 2024-11-14T21:52:00,874 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/ef788a4cade8:0 server-side Connection retries=45 2024-11-14T21:52:00,877 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T21:52:00,878 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T21:52:00,878 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T21:52:00,881 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T21:52:00,882 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T21:52:00,891 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-14T21:52:00,894 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T21:52:00,902 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42339 2024-11-14T21:52:00,904 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42339 connecting to ZooKeeper ensemble=127.0.0.1:55688 2024-11-14T21:52:00,905 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T21:52:00,908 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T21:52:00,927 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:423390x0, quorum=127.0.0.1:55688, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T21:52:00,931 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:423390x0, quorum=127.0.0.1:55688, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T21:52:00,939 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-14T21:52:00,939 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42339-0x100ac6cf3b80001 connected 2024-11-14T21:52:00,962 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-14T21:52:00,967 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42339-0x100ac6cf3b80001, quorum=127.0.0.1:55688, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-14T21:52:00,987 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42339-0x100ac6cf3b80001, quorum=127.0.0.1:55688, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T21:52:00,998 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42339 2024-11-14T21:52:01,002 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42339 2024-11-14T21:52:01,018 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42339 2024-11-14T21:52:01,025 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42339 2024-11-14T21:52:01,026 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42339 2024-11-14T21:52:01,027 DEBUG [Time-limited test {}] regionserver.TestRegionServerReportForDuty(238): Starting master: ef788a4cade8,61512,1731621119934 2024-11-14T21:52:01,055 DEBUG [M:0;ef788a4cade8:61512 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;ef788a4cade8:61512 2024-11-14T21:52:01,057 INFO [master/ef788a4cade8:61512:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/ef788a4cade8,61512,1731621119934 2024-11-14T21:52:01,066 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42339-0x100ac6cf3b80001, quorum=127.0.0.1:55688, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T21:52:01,066 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:61512-0x100ac6cf3b80000, quorum=127.0.0.1:55688, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T21:52:01,079 DEBUG [master/ef788a4cade8:61512:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:61512-0x100ac6cf3b80000, quorum=127.0.0.1:55688, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/ef788a4cade8,61512,1731621119934 2024-11-14T21:52:01,123 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42339-0x100ac6cf3b80001, quorum=127.0.0.1:55688, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-14T21:52:01,125 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42339-0x100ac6cf3b80001, quorum=127.0.0.1:55688, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T21:52:01,134 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:61512-0x100ac6cf3b80000, quorum=127.0.0.1:55688, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T21:52:01,135 DEBUG [master/ef788a4cade8:61512:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:61512-0x100ac6cf3b80000, quorum=127.0.0.1:55688, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-14T21:52:01,137 INFO [master/ef788a4cade8:61512:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/ef788a4cade8,61512,1731621119934 from backup master directory 2024-11-14T21:52:01,143 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:61512-0x100ac6cf3b80000, quorum=127.0.0.1:55688, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/ef788a4cade8,61512,1731621119934 2024-11-14T21:52:01,143 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42339-0x100ac6cf3b80001, quorum=127.0.0.1:55688, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T21:52:01,143 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:61512-0x100ac6cf3b80000, quorum=127.0.0.1:55688, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T21:52:01,144 WARN [master/ef788a4cade8:61512:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T21:52:01,145 INFO [master/ef788a4cade8:61512:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=ef788a4cade8,61512,1731621119934 2024-11-14T21:52:01,147 INFO [master/ef788a4cade8:61512:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-14T21:52:01,151 INFO [master/ef788a4cade8:61512:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-14T21:52:01,234 DEBUG [master/ef788a4cade8:61512:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:45089/user/jenkins/test-data/d2cf36fe-e8a1-d9af-fde9-1a9bbb9106bc/hbase.id] with ID: d3275003-db2e-4dfa-a218-bf99a1ffe399 2024-11-14T21:52:01,235 DEBUG [master/ef788a4cade8:61512:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:45089/user/jenkins/test-data/d2cf36fe-e8a1-d9af-fde9-1a9bbb9106bc/.tmp/hbase.id 2024-11-14T21:52:01,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35747 is added to blk_1073741826_1002 (size=42) 2024-11-14T21:52:01,278 DEBUG [master/ef788a4cade8:61512:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:45089/user/jenkins/test-data/d2cf36fe-e8a1-d9af-fde9-1a9bbb9106bc/.tmp/hbase.id]:[hdfs://localhost:45089/user/jenkins/test-data/d2cf36fe-e8a1-d9af-fde9-1a9bbb9106bc/hbase.id] 2024-11-14T21:52:01,339 INFO [master/ef788a4cade8:61512:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T21:52:01,346 INFO [master/ef788a4cade8:61512:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-14T21:52:01,373 INFO [master/ef788a4cade8:61512:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 25ms. 2024-11-14T21:52:01,382 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42339-0x100ac6cf3b80001, quorum=127.0.0.1:55688, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T21:52:01,382 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:61512-0x100ac6cf3b80000, quorum=127.0.0.1:55688, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T21:52:01,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35747 is added to blk_1073741827_1003 (size=196) 2024-11-14T21:52:01,427 INFO [master/ef788a4cade8:61512:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-14T21:52:01,430 INFO [master/ef788a4cade8:61512:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-14T21:52:01,449 DEBUG [master/ef788a4cade8:61512:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:150) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:174) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:262) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:231) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:400) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:1003) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2535) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:613) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.lambda$tracedRunnable$2(TraceUtil.java:155) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T21:52:01,454 INFO [master/ef788a4cade8:61512:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-14T21:52:01,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35747 is added to blk_1073741828_1004 (size=1189) 2024-11-14T21:52:01,535 DEBUG [Time-limited test {}] regionserver.TestRegionServerReportForDuty(349): Waiting for master to come online ... 2024-11-14T21:52:01,917 INFO [master/ef788a4cade8:61512:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:45089/user/jenkins/test-data/d2cf36fe-e8a1-d9af-fde9-1a9bbb9106bc/MasterData/data/master/store 2024-11-14T21:52:01,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35747 is added to blk_1073741829_1005 (size=34) 2024-11-14T21:52:01,953 INFO [master/ef788a4cade8:61512:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-14T21:52:01,958 DEBUG [master/ef788a4cade8:61512:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T21:52:01,960 DEBUG [master/ef788a4cade8:61512:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-14T21:52:01,961 INFO [master/ef788a4cade8:61512:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T21:52:01,961 DEBUG [master/ef788a4cade8:61512:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T21:52:01,963 DEBUG [master/ef788a4cade8:61512:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-14T21:52:01,964 DEBUG [master/ef788a4cade8:61512:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T21:52:01,964 INFO [master/ef788a4cade8:61512:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T21:52:01,966 DEBUG [master/ef788a4cade8:61512:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731621121960Disabling compacts and flushes for region at 1731621121960Disabling writes for close at 1731621121963 (+3 ms)Writing region close event to WAL at 1731621121964 (+1 ms)Closed at 1731621121964 2024-11-14T21:52:01,969 WARN [master/ef788a4cade8:61512:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:45089/user/jenkins/test-data/d2cf36fe-e8a1-d9af-fde9-1a9bbb9106bc/MasterData/data/master/store/.initializing 2024-11-14T21:52:01,969 DEBUG [master/ef788a4cade8:61512:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:45089/user/jenkins/test-data/d2cf36fe-e8a1-d9af-fde9-1a9bbb9106bc/MasterData/WALs/ef788a4cade8,61512,1731621119934 2024-11-14T21:52:01,981 INFO [master/ef788a4cade8:61512:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-14T21:52:02,003 INFO [master/ef788a4cade8:61512:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ef788a4cade8%2C61512%2C1731621119934, suffix=, logDir=hdfs://localhost:45089/user/jenkins/test-data/d2cf36fe-e8a1-d9af-fde9-1a9bbb9106bc/MasterData/WALs/ef788a4cade8,61512,1731621119934, archiveDir=hdfs://localhost:45089/user/jenkins/test-data/d2cf36fe-e8a1-d9af-fde9-1a9bbb9106bc/MasterData/oldWALs, maxLogs=10 2024-11-14T21:52:02,031 DEBUG [master/ef788a4cade8:61512:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/d2cf36fe-e8a1-d9af-fde9-1a9bbb9106bc/MasterData/WALs/ef788a4cade8,61512,1731621119934/ef788a4cade8%2C61512%2C1731621119934.1731621122009, exclude list is [], retry=0 2024-11-14T21:52:02,035 DEBUG [Time-limited test {}] regionserver.TestRegionServerReportForDuty(349): Waiting for master to come online ... 2024-11-14T21:52:02,048 DEBUG [HRegionServer-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35747,DS-81490912-7394-45a2-b5f5-575a450b3a00,DISK] 2024-11-14T21:52:02,052 DEBUG [HRegionServer-EventLoopGroup-3-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-11-14T21:52:02,089 INFO [master/ef788a4cade8:61512:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d2cf36fe-e8a1-d9af-fde9-1a9bbb9106bc/MasterData/WALs/ef788a4cade8,61512,1731621119934/ef788a4cade8%2C61512%2C1731621119934.1731621122009 2024-11-14T21:52:02,090 DEBUG [master/ef788a4cade8:61512:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:44917:44917)] 2024-11-14T21:52:02,091 DEBUG [master/ef788a4cade8:61512:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-14T21:52:02,091 DEBUG [master/ef788a4cade8:61512:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T21:52:02,095 DEBUG [master/ef788a4cade8:61512:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T21:52:02,096 DEBUG [master/ef788a4cade8:61512:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T21:52:02,135 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T21:52:02,166 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-14T21:52:02,169 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T21:52:02,172 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T21:52:02,173 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T21:52:02,176 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-14T21:52:02,176 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T21:52:02,177 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T21:52:02,178 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T21:52:02,181 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-14T21:52:02,181 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T21:52:02,182 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T21:52:02,183 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T21:52:02,186 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-14T21:52:02,186 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T21:52:02,187 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T21:52:02,187 DEBUG [master/ef788a4cade8:61512:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T21:52:02,190 DEBUG [master/ef788a4cade8:61512:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45089/user/jenkins/test-data/d2cf36fe-e8a1-d9af-fde9-1a9bbb9106bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-14T21:52:02,192 DEBUG [master/ef788a4cade8:61512:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45089/user/jenkins/test-data/d2cf36fe-e8a1-d9af-fde9-1a9bbb9106bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-14T21:52:02,197 DEBUG [master/ef788a4cade8:61512:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T21:52:02,197 DEBUG [master/ef788a4cade8:61512:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T21:52:02,201 DEBUG [master/ef788a4cade8:61512:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-14T21:52:02,204 DEBUG [master/ef788a4cade8:61512:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T21:52:02,209 DEBUG [master/ef788a4cade8:61512:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45089/user/jenkins/test-data/d2cf36fe-e8a1-d9af-fde9-1a9bbb9106bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T21:52:02,210 INFO [master/ef788a4cade8:61512:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64583115, jitterRate=-0.037636592984199524}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-14T21:52:02,216 DEBUG [master/ef788a4cade8:61512:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731621122109Initializing all the Stores at 1731621122111 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731621122112 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731621122112Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731621122113 (+1 ms)Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731621122113Cleaning up temporary data from old regions at 1731621122197 (+84 ms)Region opened successfully at 1731621122216 (+19 ms) 2024-11-14T21:52:02,217 INFO [master/ef788a4cade8:61512:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-14T21:52:02,252 DEBUG [master/ef788a4cade8:61512:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@677349b2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=ef788a4cade8/172.17.0.2:0 2024-11-14T21:52:02,297 INFO [master/ef788a4cade8:61512:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-14T21:52:02,314 INFO [master/ef788a4cade8:61512:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-14T21:52:02,314 INFO [master/ef788a4cade8:61512:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-14T21:52:02,318 INFO [master/ef788a4cade8:61512:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-14T21:52:02,320 INFO [master/ef788a4cade8:61512:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-14T21:52:02,327 INFO [master/ef788a4cade8:61512:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 6 msec 2024-11-14T21:52:02,327 INFO [master/ef788a4cade8:61512:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-14T21:52:02,355 INFO [master/ef788a4cade8:61512:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-14T21:52:02,364 DEBUG [master/ef788a4cade8:61512:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:61512-0x100ac6cf3b80000, quorum=127.0.0.1:55688, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-14T21:52:02,366 DEBUG [master/ef788a4cade8:61512:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-14T21:52:02,369 INFO [master/ef788a4cade8:61512:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-14T21:52:02,370 DEBUG [master/ef788a4cade8:61512:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:61512-0x100ac6cf3b80000, quorum=127.0.0.1:55688, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-14T21:52:02,373 DEBUG [master/ef788a4cade8:61512:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-14T21:52:02,376 INFO [master/ef788a4cade8:61512:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-14T21:52:02,379 DEBUG [master/ef788a4cade8:61512:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:61512-0x100ac6cf3b80000, quorum=127.0.0.1:55688, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-14T21:52:02,381 DEBUG [master/ef788a4cade8:61512:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-14T21:52:02,383 DEBUG [master/ef788a4cade8:61512:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:61512-0x100ac6cf3b80000, quorum=127.0.0.1:55688, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-14T21:52:02,385 DEBUG [master/ef788a4cade8:61512:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-14T21:52:02,407 DEBUG [master/ef788a4cade8:61512:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:61512-0x100ac6cf3b80000, quorum=127.0.0.1:55688, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-14T21:52:02,409 DEBUG [master/ef788a4cade8:61512:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-14T21:52:02,414 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42339-0x100ac6cf3b80001, quorum=127.0.0.1:55688, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-14T21:52:02,414 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:61512-0x100ac6cf3b80000, quorum=127.0.0.1:55688, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-14T21:52:02,414 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42339-0x100ac6cf3b80001, quorum=127.0.0.1:55688, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T21:52:02,414 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:61512-0x100ac6cf3b80000, quorum=127.0.0.1:55688, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T21:52:02,417 INFO [RS:0;ef788a4cade8:42339 {}] regionserver.HRegionServer(746): ClusterId : d3275003-db2e-4dfa-a218-bf99a1ffe399 2024-11-14T21:52:02,417 INFO [master/ef788a4cade8:61512:becomeActiveMaster {}] master.HMaster(856): Active/primary master=ef788a4cade8,61512,1731621119934, sessionid=0x100ac6cf3b80000, setting cluster-up flag (Was=false) 2024-11-14T21:52:02,420 DEBUG [RS:0;ef788a4cade8:42339 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-14T21:52:02,425 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42339-0x100ac6cf3b80001, quorum=127.0.0.1:55688, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T21:52:02,425 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:61512-0x100ac6cf3b80000, quorum=127.0.0.1:55688, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T21:52:02,437 DEBUG [master/ef788a4cade8:61512:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-14T21:52:02,441 DEBUG [RS:0;ef788a4cade8:42339 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-14T21:52:02,441 DEBUG [RS:0;ef788a4cade8:42339 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-14T21:52:02,441 DEBUG [master/ef788a4cade8:61512:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=ef788a4cade8,61512,1731621119934 2024-11-14T21:52:02,444 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42339-0x100ac6cf3b80001, quorum=127.0.0.1:55688, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T21:52:02,444 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:61512-0x100ac6cf3b80000, quorum=127.0.0.1:55688, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T21:52:02,455 DEBUG [master/ef788a4cade8:61512:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-14T21:52:02,457 DEBUG [RS:0;ef788a4cade8:42339 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-14T21:52:02,458 DEBUG [RS:0;ef788a4cade8:42339 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@125ae636, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=ef788a4cade8/172.17.0.2:0 2024-11-14T21:52:02,458 DEBUG [master/ef788a4cade8:61512:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=ef788a4cade8,61512,1731621119934 2024-11-14T21:52:02,465 INFO [master/ef788a4cade8:61512:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:45089/user/jenkins/test-data/d2cf36fe-e8a1-d9af-fde9-1a9bbb9106bc/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-14T21:52:02,473 DEBUG [RS:0;ef788a4cade8:42339 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;ef788a4cade8:42339 2024-11-14T21:52:02,475 INFO [RS:0;ef788a4cade8:42339 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-14T21:52:02,475 INFO [RS:0;ef788a4cade8:42339 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-14T21:52:02,476 DEBUG [RS:0;ef788a4cade8:42339 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-14T21:52:02,478 INFO [RS:0;ef788a4cade8:42339 {}] regionserver.HRegionServer(2659): reportForDuty to master=ef788a4cade8,61512,1731621119934 with port=42339, startcode=1731621120809 2024-11-14T21:52:02,489 DEBUG [RS:0;ef788a4cade8:42339 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-14T21:52:02,535 DEBUG [Time-limited test {}] regionserver.TestRegionServerReportForDuty(349): Waiting for master to come online ... 2024-11-14T21:52:02,535 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33157, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=RegionServerStatusService 2024-11-14T21:52:02,546 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=61512 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-14T21:52:02,548 DEBUG [master/ef788a4cade8:61512:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-14T21:52:02,561 INFO [master/ef788a4cade8:61512:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-14T21:52:02,580 INFO [master/ef788a4cade8:61512:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-14T21:52:02,586 DEBUG [RS:0;ef788a4cade8:42339 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-14T21:52:02,586 WARN [RS:0;ef788a4cade8:42339 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-14T21:52:02,588 DEBUG [master/ef788a4cade8:61512:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: ef788a4cade8,61512,1731621119934 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-14T21:52:02,604 DEBUG [master/ef788a4cade8:61512:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/ef788a4cade8:61512, corePoolSize=5, maxPoolSize=5 2024-11-14T21:52:02,604 DEBUG [master/ef788a4cade8:61512:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/ef788a4cade8:61512, corePoolSize=5, maxPoolSize=5 2024-11-14T21:52:02,604 DEBUG [master/ef788a4cade8:61512:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/ef788a4cade8:61512, corePoolSize=5, maxPoolSize=5 2024-11-14T21:52:02,605 DEBUG [master/ef788a4cade8:61512:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/ef788a4cade8:61512, corePoolSize=5, maxPoolSize=5 2024-11-14T21:52:02,605 DEBUG [master/ef788a4cade8:61512:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/ef788a4cade8:61512, corePoolSize=10, maxPoolSize=10 2024-11-14T21:52:02,605 DEBUG [master/ef788a4cade8:61512:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/ef788a4cade8:61512, corePoolSize=1, maxPoolSize=1 2024-11-14T21:52:02,605 DEBUG [master/ef788a4cade8:61512:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/ef788a4cade8:61512, corePoolSize=2, maxPoolSize=2 2024-11-14T21:52:02,605 DEBUG [master/ef788a4cade8:61512:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/ef788a4cade8:61512, corePoolSize=1, maxPoolSize=1 2024-11-14T21:52:02,613 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T21:52:02,614 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-14T21:52:02,627 INFO [master/ef788a4cade8:61512:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731621152627 2024-11-14T21:52:02,629 INFO [master/ef788a4cade8:61512:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-14T21:52:02,630 INFO [master/ef788a4cade8:61512:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-14T21:52:02,638 INFO [master/ef788a4cade8:61512:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-14T21:52:02,642 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T21:52:02,642 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-14T21:52:02,638 INFO [master/ef788a4cade8:61512:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-14T21:52:02,644 INFO [master/ef788a4cade8:61512:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-14T21:52:02,644 INFO [master/ef788a4cade8:61512:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-14T21:52:02,646 INFO [master/ef788a4cade8:61512:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-14T21:52:02,650 INFO [master/ef788a4cade8:61512:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-14T21:52:02,652 INFO [master/ef788a4cade8:61512:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-14T21:52:02,653 INFO [master/ef788a4cade8:61512:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-14T21:52:02,673 INFO [master/ef788a4cade8:61512:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-14T21:52:02,674 INFO [master/ef788a4cade8:61512:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-14T21:52:02,678 DEBUG [master/ef788a4cade8:61512:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/ef788a4cade8:61512:becomeActiveMaster-HFileCleaner.large.0-1731621122676,5,FailOnTimeoutGroup] 2024-11-14T21:52:02,679 DEBUG [master/ef788a4cade8:61512:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/ef788a4cade8:61512:becomeActiveMaster-HFileCleaner.small.0-1731621122678,5,FailOnTimeoutGroup] 2024-11-14T21:52:02,679 INFO [master/ef788a4cade8:61512:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-14T21:52:02,679 INFO [master/ef788a4cade8:61512:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-14T21:52:02,681 INFO [master/ef788a4cade8:61512:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-14T21:52:02,681 INFO [master/ef788a4cade8:61512:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-14T21:52:02,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35747 is added to blk_1073741831_1007 (size=1321) 2024-11-14T21:52:02,688 INFO [RS:0;ef788a4cade8:42339 {}] regionserver.HRegionServer(2659): reportForDuty to master=ef788a4cade8,61512,1731621119934 with port=42339, startcode=1731621120809 2024-11-14T21:52:02,695 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=61512 {}] master.ServerManager(363): Checking decommissioned status of RegionServer ef788a4cade8,42339,1731621120809 2024-11-14T21:52:02,698 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=61512 {}] master.ServerManager(517): Registering regionserver=ef788a4cade8,42339,1731621120809 2024-11-14T21:52:02,708 DEBUG [RS:0;ef788a4cade8:42339 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:45089/user/jenkins/test-data/d2cf36fe-e8a1-d9af-fde9-1a9bbb9106bc 2024-11-14T21:52:02,708 DEBUG [RS:0;ef788a4cade8:42339 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:45089 2024-11-14T21:52:02,709 DEBUG [RS:0;ef788a4cade8:42339 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-14T21:52:02,714 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:61512-0x100ac6cf3b80000, quorum=127.0.0.1:55688, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T21:52:02,716 DEBUG [RS:0;ef788a4cade8:42339 {}] zookeeper.ZKUtil(111): regionserver:42339-0x100ac6cf3b80001, quorum=127.0.0.1:55688, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/ef788a4cade8,42339,1731621120809 2024-11-14T21:52:02,717 WARN [RS:0;ef788a4cade8:42339 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T21:52:02,717 INFO [RS:0;ef788a4cade8:42339 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-14T21:52:02,717 DEBUG [RS:0;ef788a4cade8:42339 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:45089/user/jenkins/test-data/d2cf36fe-e8a1-d9af-fde9-1a9bbb9106bc/WALs/ef788a4cade8,42339,1731621120809 2024-11-14T21:52:02,722 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [ef788a4cade8,42339,1731621120809] 2024-11-14T21:52:02,744 INFO [RS:0;ef788a4cade8:42339 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-14T21:52:02,759 INFO [RS:0;ef788a4cade8:42339 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-14T21:52:02,766 INFO [RS:0;ef788a4cade8:42339 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-14T21:52:02,766 INFO [RS:0;ef788a4cade8:42339 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T21:52:02,768 INFO [RS:0;ef788a4cade8:42339 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-14T21:52:02,776 INFO [RS:0;ef788a4cade8:42339 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-14T21:52:02,778 INFO [RS:0;ef788a4cade8:42339 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-14T21:52:02,779 DEBUG [RS:0;ef788a4cade8:42339 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/ef788a4cade8:0, corePoolSize=1, maxPoolSize=1 2024-11-14T21:52:02,779 DEBUG [RS:0;ef788a4cade8:42339 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/ef788a4cade8:0, corePoolSize=1, maxPoolSize=1 2024-11-14T21:52:02,779 DEBUG [RS:0;ef788a4cade8:42339 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/ef788a4cade8:0, corePoolSize=1, maxPoolSize=1 2024-11-14T21:52:02,779 DEBUG [RS:0;ef788a4cade8:42339 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/ef788a4cade8:0, corePoolSize=1, maxPoolSize=1 2024-11-14T21:52:02,780 DEBUG [RS:0;ef788a4cade8:42339 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/ef788a4cade8:0, corePoolSize=1, maxPoolSize=1 2024-11-14T21:52:02,780 DEBUG [RS:0;ef788a4cade8:42339 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/ef788a4cade8:0, corePoolSize=2, maxPoolSize=2 2024-11-14T21:52:02,780 DEBUG [RS:0;ef788a4cade8:42339 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/ef788a4cade8:0, corePoolSize=1, maxPoolSize=1 2024-11-14T21:52:02,780 DEBUG [RS:0;ef788a4cade8:42339 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/ef788a4cade8:0, corePoolSize=1, maxPoolSize=1 2024-11-14T21:52:02,781 DEBUG [RS:0;ef788a4cade8:42339 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/ef788a4cade8:0, corePoolSize=1, maxPoolSize=1 2024-11-14T21:52:02,781 DEBUG [RS:0;ef788a4cade8:42339 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/ef788a4cade8:0, corePoolSize=1, maxPoolSize=1 2024-11-14T21:52:02,781 DEBUG [RS:0;ef788a4cade8:42339 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/ef788a4cade8:0, corePoolSize=1, maxPoolSize=1 2024-11-14T21:52:02,781 DEBUG [RS:0;ef788a4cade8:42339 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/ef788a4cade8:0, corePoolSize=1, maxPoolSize=1 2024-11-14T21:52:02,782 DEBUG [RS:0;ef788a4cade8:42339 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/ef788a4cade8:0, corePoolSize=3, maxPoolSize=3 2024-11-14T21:52:02,782 DEBUG [RS:0;ef788a4cade8:42339 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/ef788a4cade8:0, corePoolSize=3, maxPoolSize=3 2024-11-14T21:52:02,787 INFO [RS:0;ef788a4cade8:42339 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T21:52:02,790 INFO [RS:0;ef788a4cade8:42339 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T21:52:02,791 INFO [RS:0;ef788a4cade8:42339 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T21:52:02,791 INFO [RS:0;ef788a4cade8:42339 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-14T21:52:02,791 INFO [RS:0;ef788a4cade8:42339 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-14T21:52:02,791 INFO [RS:0;ef788a4cade8:42339 {}] hbase.ChoreService(168): Chore ScheduledChore name=ef788a4cade8,42339,1731621120809-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T21:52:02,816 INFO [RS:0;ef788a4cade8:42339 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-14T21:52:02,819 INFO [RS:0;ef788a4cade8:42339 {}] hbase.ChoreService(168): Chore ScheduledChore name=ef788a4cade8,42339,1731621120809-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T21:52:02,819 INFO [RS:0;ef788a4cade8:42339 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T21:52:02,819 INFO [RS:0;ef788a4cade8:42339 {}] regionserver.Replication(171): ef788a4cade8,42339,1731621120809 started 2024-11-14T21:52:02,846 INFO [RS:0;ef788a4cade8:42339 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T21:52:02,847 INFO [RS:0;ef788a4cade8:42339 {}] regionserver.HRegionServer(1482): Serving as ef788a4cade8,42339,1731621120809, RpcServer on ef788a4cade8/172.17.0.2:42339, sessionid=0x100ac6cf3b80001 2024-11-14T21:52:02,847 DEBUG [RS:0;ef788a4cade8:42339 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-14T21:52:02,848 DEBUG [RS:0;ef788a4cade8:42339 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager ef788a4cade8,42339,1731621120809 2024-11-14T21:52:02,848 DEBUG [RS:0;ef788a4cade8:42339 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'ef788a4cade8,42339,1731621120809' 2024-11-14T21:52:02,848 DEBUG [RS:0;ef788a4cade8:42339 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-14T21:52:02,849 DEBUG [RS:0;ef788a4cade8:42339 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-14T21:52:02,850 DEBUG [RS:0;ef788a4cade8:42339 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-14T21:52:02,850 DEBUG [RS:0;ef788a4cade8:42339 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-14T21:52:02,850 DEBUG [RS:0;ef788a4cade8:42339 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager ef788a4cade8,42339,1731621120809 2024-11-14T21:52:02,850 DEBUG [RS:0;ef788a4cade8:42339 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'ef788a4cade8,42339,1731621120809' 2024-11-14T21:52:02,850 DEBUG [RS:0;ef788a4cade8:42339 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-14T21:52:02,851 DEBUG [RS:0;ef788a4cade8:42339 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-14T21:52:02,852 DEBUG [RS:0;ef788a4cade8:42339 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-14T21:52:02,852 INFO [RS:0;ef788a4cade8:42339 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-14T21:52:02,852 INFO [RS:0;ef788a4cade8:42339 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-14T21:52:02,957 INFO [RS:0;ef788a4cade8:42339 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-14T21:52:02,961 INFO [RS:0;ef788a4cade8:42339 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ef788a4cade8%2C42339%2C1731621120809, suffix=, logDir=hdfs://localhost:45089/user/jenkins/test-data/d2cf36fe-e8a1-d9af-fde9-1a9bbb9106bc/WALs/ef788a4cade8,42339,1731621120809, archiveDir=hdfs://localhost:45089/user/jenkins/test-data/d2cf36fe-e8a1-d9af-fde9-1a9bbb9106bc/oldWALs, maxLogs=32 2024-11-14T21:52:02,981 DEBUG [RS:0;ef788a4cade8:42339 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/d2cf36fe-e8a1-d9af-fde9-1a9bbb9106bc/WALs/ef788a4cade8,42339,1731621120809/ef788a4cade8%2C42339%2C1731621120809.1731621122964, exclude list is [], retry=0 2024-11-14T21:52:02,986 DEBUG [HRegionServer-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35747,DS-81490912-7394-45a2-b5f5-575a450b3a00,DISK] 2024-11-14T21:52:02,990 INFO [RS:0;ef788a4cade8:42339 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d2cf36fe-e8a1-d9af-fde9-1a9bbb9106bc/WALs/ef788a4cade8,42339,1731621120809/ef788a4cade8%2C42339%2C1731621120809.1731621122964 2024-11-14T21:52:02,991 DEBUG [RS:0;ef788a4cade8:42339 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:44917:44917)] 2024-11-14T21:52:03,036 DEBUG [Time-limited test {}] regionserver.TestRegionServerReportForDuty(349): Waiting for master to come online ... 2024-11-14T21:52:03,086 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:45089/user/jenkins/test-data/d2cf36fe-e8a1-d9af-fde9-1a9bbb9106bc/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-14T21:52:03,086 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:45089/user/jenkins/test-data/d2cf36fe-e8a1-d9af-fde9-1a9bbb9106bc 2024-11-14T21:52:03,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35747 is added to blk_1073741833_1009 (size=32) 2024-11-14T21:52:03,501 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T21:52:03,504 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-14T21:52:03,506 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-14T21:52:03,507 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T21:52:03,507 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T21:52:03,508 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-14T21:52:03,510 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-14T21:52:03,510 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T21:52:03,511 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T21:52:03,511 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-14T21:52:03,513 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-14T21:52:03,513 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T21:52:03,514 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T21:52:03,514 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-14T21:52:03,516 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-14T21:52:03,516 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T21:52:03,517 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T21:52:03,517 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-14T21:52:03,518 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45089/user/jenkins/test-data/d2cf36fe-e8a1-d9af-fde9-1a9bbb9106bc/data/hbase/meta/1588230740 2024-11-14T21:52:03,519 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45089/user/jenkins/test-data/d2cf36fe-e8a1-d9af-fde9-1a9bbb9106bc/data/hbase/meta/1588230740 2024-11-14T21:52:03,522 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-14T21:52:03,522 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-14T21:52:03,523 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-14T21:52:03,525 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-14T21:52:03,529 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45089/user/jenkins/test-data/d2cf36fe-e8a1-d9af-fde9-1a9bbb9106bc/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T21:52:03,530 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74659697, jitterRate=0.11251617968082428}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-14T21:52:03,532 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731621123501Initializing all the Stores at 1731621123503 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731621123503Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731621123504 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731621123504Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731621123504Cleaning up temporary data from old regions at 1731621123522 (+18 ms)Region opened successfully at 1731621123532 (+10 ms) 2024-11-14T21:52:03,533 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-14T21:52:03,533 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-14T21:52:03,533 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-14T21:52:03,533 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-14T21:52:03,533 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-14T21:52:03,534 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-14T21:52:03,534 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731621123533Disabling compacts and flushes for region at 1731621123533Disabling writes for close at 1731621123533Writing region close event to WAL at 1731621123534 (+1 ms)Closed at 1731621123534 2024-11-14T21:52:03,536 DEBUG [Time-limited test {}] regionserver.TestRegionServerReportForDuty(349): Waiting for master to come online ... 2024-11-14T21:52:03,538 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T21:52:03,538 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-14T21:52:03,545 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-14T21:52:03,553 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-14T21:52:03,557 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-14T21:52:03,710 DEBUG [ef788a4cade8:61512 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-14T21:52:03,722 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=ef788a4cade8,42339,1731621120809 2024-11-14T21:52:03,728 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as ef788a4cade8,42339,1731621120809, state=OPENING 2024-11-14T21:52:03,733 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-14T21:52:03,738 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:61512-0x100ac6cf3b80000, quorum=127.0.0.1:55688, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T21:52:03,738 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42339-0x100ac6cf3b80001, quorum=127.0.0.1:55688, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T21:52:03,739 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T21:52:03,740 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T21:52:03,741 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-14T21:52:03,743 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=ef788a4cade8,42339,1731621120809}] 2024-11-14T21:52:03,921 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-14T21:52:03,925 INFO [HRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51669, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-14T21:52:03,936 INFO [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-14T21:52:03,937 INFO [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-14T21:52:03,937 INFO [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-14T21:52:03,941 INFO [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ef788a4cade8%2C42339%2C1731621120809.meta, suffix=.meta, logDir=hdfs://localhost:45089/user/jenkins/test-data/d2cf36fe-e8a1-d9af-fde9-1a9bbb9106bc/WALs/ef788a4cade8,42339,1731621120809, archiveDir=hdfs://localhost:45089/user/jenkins/test-data/d2cf36fe-e8a1-d9af-fde9-1a9bbb9106bc/oldWALs, maxLogs=32 2024-11-14T21:52:03,957 DEBUG [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/d2cf36fe-e8a1-d9af-fde9-1a9bbb9106bc/WALs/ef788a4cade8,42339,1731621120809/ef788a4cade8%2C42339%2C1731621120809.meta.1731621123942.meta, exclude list is [], retry=0 2024-11-14T21:52:03,960 DEBUG [HRegionServer-EventLoopGroup-3-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35747,DS-81490912-7394-45a2-b5f5-575a450b3a00,DISK] 2024-11-14T21:52:03,964 INFO [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d2cf36fe-e8a1-d9af-fde9-1a9bbb9106bc/WALs/ef788a4cade8,42339,1731621120809/ef788a4cade8%2C42339%2C1731621120809.meta.1731621123942.meta 2024-11-14T21:52:03,964 DEBUG [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:44917:44917)] 2024-11-14T21:52:03,964 DEBUG [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-14T21:52:03,966 DEBUG [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-14T21:52:03,969 DEBUG [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-14T21:52:03,974 INFO [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-14T21:52:03,978 DEBUG [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-14T21:52:03,978 DEBUG [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T21:52:03,979 DEBUG [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-14T21:52:03,979 DEBUG [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-14T21:52:03,982 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-14T21:52:03,984 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-14T21:52:03,984 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T21:52:03,985 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T21:52:03,985 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-14T21:52:03,987 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-14T21:52:03,987 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T21:52:03,988 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T21:52:03,988 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-14T21:52:03,990 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-14T21:52:03,990 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T21:52:03,991 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T21:52:03,991 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-14T21:52:03,993 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-14T21:52:03,993 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T21:52:03,994 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T21:52:03,994 DEBUG [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-14T21:52:03,995 DEBUG [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45089/user/jenkins/test-data/d2cf36fe-e8a1-d9af-fde9-1a9bbb9106bc/data/hbase/meta/1588230740 2024-11-14T21:52:03,998 DEBUG [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45089/user/jenkins/test-data/d2cf36fe-e8a1-d9af-fde9-1a9bbb9106bc/data/hbase/meta/1588230740 2024-11-14T21:52:04,000 DEBUG [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-14T21:52:04,000 DEBUG [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-14T21:52:04,001 DEBUG [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-14T21:52:04,003 DEBUG [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-14T21:52:04,005 INFO [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68639621, jitterRate=0.022810056805610657}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-14T21:52:04,005 DEBUG [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-14T21:52:04,007 DEBUG [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731621123979Writing region info on filesystem at 1731621123979Initializing all the Stores at 1731621123981 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731621123981Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731621123982 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731621123982Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731621123982Cleaning up temporary data from old regions at 1731621124000 (+18 ms)Running coprocessor post-open hooks at 1731621124005 (+5 ms)Region opened successfully at 1731621124006 (+1 ms) 2024-11-14T21:52:04,013 INFO [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731621123909 2024-11-14T21:52:04,026 DEBUG [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-14T21:52:04,026 INFO [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-14T21:52:04,028 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=ef788a4cade8,42339,1731621120809 2024-11-14T21:52:04,031 INFO [PEWorker-4 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as ef788a4cade8,42339,1731621120809, state=OPEN 2024-11-14T21:52:04,034 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:61512-0x100ac6cf3b80000, quorum=127.0.0.1:55688, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-14T21:52:04,034 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42339-0x100ac6cf3b80001, quorum=127.0.0.1:55688, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-14T21:52:04,034 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T21:52:04,034 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T21:52:04,034 DEBUG [PEWorker-4 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=ef788a4cade8,42339,1731621120809 2024-11-14T21:52:04,036 DEBUG [Time-limited test {}] regionserver.TestRegionServerReportForDuty(349): Waiting for master to come online ... 2024-11-14T21:52:04,039 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-14T21:52:04,040 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=ef788a4cade8,42339,1731621120809 in 292 msec 2024-11-14T21:52:04,046 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-14T21:52:04,046 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 497 msec 2024-11-14T21:52:04,047 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T21:52:04,048 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-14T21:52:04,068 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-14T21:52:04,070 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ef788a4cade8,42339,1731621120809, seqNum=-1] 2024-11-14T21:52:04,092 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-14T21:52:04,095 INFO [HRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47237, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-14T21:52:04,118 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.6200 sec 2024-11-14T21:52:04,118 INFO [master/ef788a4cade8:61512:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731621124118, completionTime=-1 2024-11-14T21:52:04,121 INFO [master/ef788a4cade8:61512:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-14T21:52:04,121 DEBUG [master/ef788a4cade8:61512:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-14T21:52:04,150 INFO [master/ef788a4cade8:61512:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-14T21:52:04,150 INFO [master/ef788a4cade8:61512:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731621184150 2024-11-14T21:52:04,151 INFO [master/ef788a4cade8:61512:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731621244150 2024-11-14T21:52:04,151 INFO [master/ef788a4cade8:61512:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 29 msec 2024-11-14T21:52:04,154 INFO [master/ef788a4cade8:61512:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef788a4cade8,61512,1731621119934-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T21:52:04,154 INFO [master/ef788a4cade8:61512:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef788a4cade8,61512,1731621119934-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T21:52:04,154 INFO [master/ef788a4cade8:61512:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef788a4cade8,61512,1731621119934-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T21:52:04,156 INFO [master/ef788a4cade8:61512:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-ef788a4cade8:61512, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T21:52:04,157 INFO [master/ef788a4cade8:61512:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-14T21:52:04,157 INFO [master/ef788a4cade8:61512:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-14T21:52:04,164 DEBUG [master/ef788a4cade8:61512.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-14T21:52:04,187 INFO [master/ef788a4cade8:61512:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 3.042sec 2024-11-14T21:52:04,189 INFO [master/ef788a4cade8:61512:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-14T21:52:04,190 INFO [master/ef788a4cade8:61512:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-14T21:52:04,191 INFO [master/ef788a4cade8:61512:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-14T21:52:04,192 INFO [master/ef788a4cade8:61512:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-14T21:52:04,192 INFO [master/ef788a4cade8:61512:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-14T21:52:04,193 INFO [master/ef788a4cade8:61512:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef788a4cade8,61512,1731621119934-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T21:52:04,194 INFO [master/ef788a4cade8:61512:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef788a4cade8,61512,1731621119934-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-14T21:52:04,203 DEBUG [master/ef788a4cade8:61512:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-14T21:52:04,204 INFO [master/ef788a4cade8:61512:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-14T21:52:04,204 INFO [master/ef788a4cade8:61512:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef788a4cade8,61512,1731621119934-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T21:52:04,537 DEBUG [Time-limited test {}] regionserver.TestRegionServerReportForDuty(349): Waiting for master to come online ... 2024-11-14T21:52:04,537 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-14T21:52:04,537 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=519455368, stopped=false 2024-11-14T21:52:04,538 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=ef788a4cade8,61512,1731621119934 2024-11-14T21:52:04,540 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42339-0x100ac6cf3b80001, quorum=127.0.0.1:55688, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T21:52:04,540 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:61512-0x100ac6cf3b80000, quorum=127.0.0.1:55688, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T21:52:04,540 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42339-0x100ac6cf3b80001, quorum=127.0.0.1:55688, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T21:52:04,540 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-14T21:52:04,540 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:61512-0x100ac6cf3b80000, quorum=127.0.0.1:55688, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T21:52:04,541 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-14T21:52:04,541 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.regionserver.TestRegionServerReportForDuty.tearDown(TestRegionServerReportForDuty.java:91) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T21:52:04,541 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42339-0x100ac6cf3b80001, quorum=127.0.0.1:55688, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T21:52:04,541 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:61512-0x100ac6cf3b80000, quorum=127.0.0.1:55688, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T21:52:04,547 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T21:52:04,548 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'ef788a4cade8,42339,1731621120809' ***** 2024-11-14T21:52:04,548 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-14T21:52:04,548 INFO [RS:0;ef788a4cade8:42339 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-14T21:52:04,549 INFO [RS:0;ef788a4cade8:42339 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-14T21:52:04,549 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-14T21:52:04,549 INFO [RS:0;ef788a4cade8:42339 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-14T21:52:04,549 INFO [RS:0;ef788a4cade8:42339 {}] regionserver.HRegionServer(959): stopping server ef788a4cade8,42339,1731621120809 2024-11-14T21:52:04,549 INFO [RS:0;ef788a4cade8:42339 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T21:52:04,550 INFO [RS:0;ef788a4cade8:42339 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;ef788a4cade8:42339. 2024-11-14T21:52:04,550 DEBUG [RS:0;ef788a4cade8:42339 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T21:52:04,550 DEBUG [RS:0;ef788a4cade8:42339 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T21:52:04,550 INFO [RS:0;ef788a4cade8:42339 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-14T21:52:04,550 INFO [RS:0;ef788a4cade8:42339 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-14T21:52:04,550 INFO [RS:0;ef788a4cade8:42339 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-14T21:52:04,550 INFO [RS:0;ef788a4cade8:42339 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-14T21:52:04,551 INFO [RS:0;ef788a4cade8:42339 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-14T21:52:04,551 DEBUG [RS:0;ef788a4cade8:42339 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-14T21:52:04,552 DEBUG [RS_CLOSE_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-14T21:52:04,552 INFO [RS_CLOSE_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-14T21:52:04,552 DEBUG [RS:0;ef788a4cade8:42339 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-14T21:52:04,552 DEBUG [RS_CLOSE_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-14T21:52:04,552 DEBUG [RS_CLOSE_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-14T21:52:04,552 DEBUG [RS_CLOSE_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-14T21:52:04,556 INFO [RS_CLOSE_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-14T21:52:04,593 INFO [regionserver/ef788a4cade8:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T21:52:04,621 DEBUG [RS_CLOSE_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45089/user/jenkins/test-data/d2cf36fe-e8a1-d9af-fde9-1a9bbb9106bc/data/hbase/meta/1588230740/.tmp/ns/f23fa2edcac64f3bb8c7365919d22a82 is 43, key is default/ns:d/1731621124101/Put/seqid=0 2024-11-14T21:52:04,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35747 is added to blk_1073741835_1011 (size=5153) 2024-11-14T21:52:04,752 DEBUG [RS:0;ef788a4cade8:42339 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-14T21:52:04,790 INFO [regionserver/ef788a4cade8:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-14T21:52:04,791 INFO [regionserver/ef788a4cade8:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-14T21:52:04,953 DEBUG [RS:0;ef788a4cade8:42339 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-14T21:52:05,035 INFO [RS_CLOSE_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:45089/user/jenkins/test-data/d2cf36fe-e8a1-d9af-fde9-1a9bbb9106bc/data/hbase/meta/1588230740/.tmp/ns/f23fa2edcac64f3bb8c7365919d22a82 2024-11-14T21:52:05,153 DEBUG [RS:0;ef788a4cade8:42339 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-14T21:52:05,156 DEBUG [RS_CLOSE_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45089/user/jenkins/test-data/d2cf36fe-e8a1-d9af-fde9-1a9bbb9106bc/data/hbase/meta/1588230740/.tmp/ns/f23fa2edcac64f3bb8c7365919d22a82 as hdfs://localhost:45089/user/jenkins/test-data/d2cf36fe-e8a1-d9af-fde9-1a9bbb9106bc/data/hbase/meta/1588230740/ns/f23fa2edcac64f3bb8c7365919d22a82 2024-11-14T21:52:05,172 INFO [RS_CLOSE_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45089/user/jenkins/test-data/d2cf36fe-e8a1-d9af-fde9-1a9bbb9106bc/data/hbase/meta/1588230740/ns/f23fa2edcac64f3bb8c7365919d22a82, entries=2, sequenceid=6, filesize=5.0 K 2024-11-14T21:52:05,180 INFO [RS_CLOSE_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 623ms, sequenceid=6, compaction requested=false 2024-11-14T21:52:05,181 DEBUG [RS_CLOSE_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-14T21:52:05,195 DEBUG [RS_CLOSE_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45089/user/jenkins/test-data/d2cf36fe-e8a1-d9af-fde9-1a9bbb9106bc/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-14T21:52:05,197 DEBUG [RS_CLOSE_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-14T21:52:05,197 INFO [RS_CLOSE_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-14T21:52:05,197 DEBUG [RS_CLOSE_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731621124551Running coprocessor pre-close hooks at 1731621124552 (+1 ms)Disabling compacts and flushes for region at 1731621124552Disabling writes for close at 1731621124552Obtaining lock to block concurrent updates at 1731621124557 (+5 ms)Preparing flush snapshotting stores in 1588230740 at 1731621124557Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1731621124565 (+8 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731621124567 (+2 ms)Flushing 1588230740/ns: creating writer at 1731621124568 (+1 ms)Flushing 1588230740/ns: appending metadata at 1731621124611 (+43 ms)Flushing 1588230740/ns: closing flushed file at 1731621124614 (+3 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@155d2421: reopening flushed file at 1731621125153 (+539 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 623ms, sequenceid=6, compaction requested=false at 1731621125180 (+27 ms)Writing region close event to WAL at 1731621125186 (+6 ms)Running coprocessor post-close hooks at 1731621125197 (+11 ms)Closed at 1731621125197 2024-11-14T21:52:05,198 DEBUG [RS_CLOSE_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-14T21:52:05,353 INFO [RS:0;ef788a4cade8:42339 {}] regionserver.HRegionServer(976): stopping server ef788a4cade8,42339,1731621120809; all regions closed. 2024-11-14T21:52:05,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35747 is added to blk_1073741834_1010 (size=1152) 2024-11-14T21:52:05,369 DEBUG [RS:0;ef788a4cade8:42339 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/d2cf36fe-e8a1-d9af-fde9-1a9bbb9106bc/oldWALs 2024-11-14T21:52:05,369 INFO [RS:0;ef788a4cade8:42339 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL ef788a4cade8%2C42339%2C1731621120809.meta:.meta(num 1731621123942) 2024-11-14T21:52:05,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35747 is added to blk_1073741832_1008 (size=93) 2024-11-14T21:52:05,381 DEBUG [RS:0;ef788a4cade8:42339 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/d2cf36fe-e8a1-d9af-fde9-1a9bbb9106bc/oldWALs 2024-11-14T21:52:05,381 INFO [RS:0;ef788a4cade8:42339 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL ef788a4cade8%2C42339%2C1731621120809:(num 1731621122964) 2024-11-14T21:52:05,381 DEBUG [RS:0;ef788a4cade8:42339 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T21:52:05,381 INFO [RS:0;ef788a4cade8:42339 {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T21:52:05,381 INFO [RS:0;ef788a4cade8:42339 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T21:52:05,382 INFO [RS:0;ef788a4cade8:42339 {}] hbase.ChoreService(370): Chore service for: regionserver/ef788a4cade8:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-14T21:52:05,382 INFO [RS:0;ef788a4cade8:42339 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T21:52:05,383 INFO [RS:0;ef788a4cade8:42339 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42339 2024-11-14T21:52:05,385 INFO [regionserver/ef788a4cade8:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T21:52:05,389 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:61512-0x100ac6cf3b80000, quorum=127.0.0.1:55688, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T21:52:05,390 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42339-0x100ac6cf3b80001, quorum=127.0.0.1:55688, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/ef788a4cade8,42339,1731621120809 2024-11-14T21:52:05,390 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [ef788a4cade8,42339,1731621120809] 2024-11-14T21:52:05,390 INFO [RS:0;ef788a4cade8:42339 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T21:52:05,392 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/ef788a4cade8,42339,1731621120809 already deleted, retry=false 2024-11-14T21:52:05,393 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; ef788a4cade8,42339,1731621120809 expired; onlineServers=0 2024-11-14T21:52:05,393 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'ef788a4cade8,61512,1731621119934' ***** 2024-11-14T21:52:05,393 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-14T21:52:05,393 INFO [M:0;ef788a4cade8:61512 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T21:52:05,393 INFO [M:0;ef788a4cade8:61512 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T21:52:05,393 DEBUG [M:0;ef788a4cade8:61512 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-14T21:52:05,394 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-14T21:52:05,394 DEBUG [M:0;ef788a4cade8:61512 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-14T21:52:05,394 DEBUG [master/ef788a4cade8:61512:becomeActiveMaster-HFileCleaner.small.0-1731621122678 {}] cleaner.HFileCleaner(306): Exit Thread[master/ef788a4cade8:61512:becomeActiveMaster-HFileCleaner.small.0-1731621122678,5,FailOnTimeoutGroup] 2024-11-14T21:52:05,394 DEBUG [master/ef788a4cade8:61512:becomeActiveMaster-HFileCleaner.large.0-1731621122676 {}] cleaner.HFileCleaner(306): Exit Thread[master/ef788a4cade8:61512:becomeActiveMaster-HFileCleaner.large.0-1731621122676,5,FailOnTimeoutGroup] 2024-11-14T21:52:05,394 INFO [M:0;ef788a4cade8:61512 {}] hbase.ChoreService(370): Chore service for: master/ef788a4cade8:61512 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-14T21:52:05,394 INFO [M:0;ef788a4cade8:61512 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T21:52:05,394 DEBUG [M:0;ef788a4cade8:61512 {}] master.HMaster(1795): Stopping service threads 2024-11-14T21:52:05,395 INFO [M:0;ef788a4cade8:61512 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-14T21:52:05,395 INFO [M:0;ef788a4cade8:61512 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-14T21:52:05,395 INFO [M:0;ef788a4cade8:61512 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-14T21:52:05,396 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-14T21:52:05,401 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:61512-0x100ac6cf3b80000, quorum=127.0.0.1:55688, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-14T21:52:05,401 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:61512-0x100ac6cf3b80000, quorum=127.0.0.1:55688, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T21:52:05,402 DEBUG [M:0;ef788a4cade8:61512 {}] zookeeper.ZKUtil(347): master:61512-0x100ac6cf3b80000, quorum=127.0.0.1:55688, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-14T21:52:05,403 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42339-0x100ac6cf3b80001, quorum=127.0.0.1:55688, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-14T21:52:05,403 WARN [M:0;ef788a4cade8:61512 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-14T21:52:05,403 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42339-0x100ac6cf3b80001, quorum=127.0.0.1:55688, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T21:52:05,405 INFO [M:0;ef788a4cade8:61512 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:45089/user/jenkins/test-data/d2cf36fe-e8a1-d9af-fde9-1a9bbb9106bc/.lastflushedseqids 2024-11-14T21:52:05,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35747 is added to blk_1073741836_1012 (size=99) 2024-11-14T21:52:05,428 INFO [M:0;ef788a4cade8:61512 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-14T21:52:05,428 INFO [M:0;ef788a4cade8:61512 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-14T21:52:05,428 DEBUG [M:0;ef788a4cade8:61512 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-14T21:52:05,429 INFO [M:0;ef788a4cade8:61512 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T21:52:05,429 DEBUG [M:0;ef788a4cade8:61512 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T21:52:05,429 DEBUG [M:0;ef788a4cade8:61512 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-14T21:52:05,429 DEBUG [M:0;ef788a4cade8:61512 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T21:52:05,429 INFO [M:0;ef788a4cade8:61512 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.53 KB heapSize=11.13 KB 2024-11-14T21:52:05,452 DEBUG [M:0;ef788a4cade8:61512 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45089/user/jenkins/test-data/d2cf36fe-e8a1-d9af-fde9-1a9bbb9106bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4bac4670f885455fbb7bd6836d82417e is 82, key is hbase:meta,,1/info:regioninfo/1731621124028/Put/seqid=0 2024-11-14T21:52:05,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35747 is added to blk_1073741837_1013 (size=5672) 2024-11-14T21:52:05,503 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42339-0x100ac6cf3b80001, quorum=127.0.0.1:55688, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T21:52:05,504 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42339-0x100ac6cf3b80001, quorum=127.0.0.1:55688, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T21:52:05,504 INFO [RS:0;ef788a4cade8:42339 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T21:52:05,504 INFO [RS:0;ef788a4cade8:42339 {}] regionserver.HRegionServer(1031): Exiting; stopping=ef788a4cade8,42339,1731621120809; zookeeper connection closed. 2024-11-14T21:52:05,505 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-14T21:52:05,860 INFO [M:0;ef788a4cade8:61512 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=28 (bloomFilter=true), to=hdfs://localhost:45089/user/jenkins/test-data/d2cf36fe-e8a1-d9af-fde9-1a9bbb9106bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4bac4670f885455fbb7bd6836d82417e 2024-11-14T21:52:05,893 DEBUG [M:0;ef788a4cade8:61512 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45089/user/jenkins/test-data/d2cf36fe-e8a1-d9af-fde9-1a9bbb9106bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c03656e736274ba8a241cf57192fcc5b is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1731621124116/Put/seqid=0 2024-11-14T21:52:05,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35747 is added to blk_1073741838_1014 (size=5275) 2024-11-14T21:52:06,318 INFO [M:0;ef788a4cade8:61512 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.98 KB at sequenceid=28 (bloomFilter=true), to=hdfs://localhost:45089/user/jenkins/test-data/d2cf36fe-e8a1-d9af-fde9-1a9bbb9106bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c03656e736274ba8a241cf57192fcc5b 2024-11-14T21:52:06,353 DEBUG [M:0;ef788a4cade8:61512 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45089/user/jenkins/test-data/d2cf36fe-e8a1-d9af-fde9-1a9bbb9106bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/220bc9fb48c2437192e098e97fb4df12 is 69, key is ef788a4cade8,42339,1731621120809/rs:state/1731621122701/Put/seqid=0 2024-11-14T21:52:06,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35747 is added to blk_1073741839_1015 (size=5156) 2024-11-14T21:52:06,775 INFO [M:0;ef788a4cade8:61512 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=28 (bloomFilter=true), to=hdfs://localhost:45089/user/jenkins/test-data/d2cf36fe-e8a1-d9af-fde9-1a9bbb9106bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/220bc9fb48c2437192e098e97fb4df12 2024-11-14T21:52:06,789 DEBUG [M:0;ef788a4cade8:61512 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45089/user/jenkins/test-data/d2cf36fe-e8a1-d9af-fde9-1a9bbb9106bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4bac4670f885455fbb7bd6836d82417e as hdfs://localhost:45089/user/jenkins/test-data/d2cf36fe-e8a1-d9af-fde9-1a9bbb9106bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/4bac4670f885455fbb7bd6836d82417e 2024-11-14T21:52:06,801 INFO [M:0;ef788a4cade8:61512 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45089/user/jenkins/test-data/d2cf36fe-e8a1-d9af-fde9-1a9bbb9106bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/4bac4670f885455fbb7bd6836d82417e, entries=8, sequenceid=28, filesize=5.5 K 2024-11-14T21:52:06,803 DEBUG [M:0;ef788a4cade8:61512 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45089/user/jenkins/test-data/d2cf36fe-e8a1-d9af-fde9-1a9bbb9106bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c03656e736274ba8a241cf57192fcc5b as hdfs://localhost:45089/user/jenkins/test-data/d2cf36fe-e8a1-d9af-fde9-1a9bbb9106bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/c03656e736274ba8a241cf57192fcc5b 2024-11-14T21:52:06,813 INFO [M:0;ef788a4cade8:61512 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45089/user/jenkins/test-data/d2cf36fe-e8a1-d9af-fde9-1a9bbb9106bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/c03656e736274ba8a241cf57192fcc5b, entries=3, sequenceid=28, filesize=5.2 K 2024-11-14T21:52:06,816 DEBUG [M:0;ef788a4cade8:61512 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45089/user/jenkins/test-data/d2cf36fe-e8a1-d9af-fde9-1a9bbb9106bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/220bc9fb48c2437192e098e97fb4df12 as hdfs://localhost:45089/user/jenkins/test-data/d2cf36fe-e8a1-d9af-fde9-1a9bbb9106bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/220bc9fb48c2437192e098e97fb4df12 2024-11-14T21:52:06,825 INFO [M:0;ef788a4cade8:61512 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45089/user/jenkins/test-data/d2cf36fe-e8a1-d9af-fde9-1a9bbb9106bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/220bc9fb48c2437192e098e97fb4df12, entries=1, sequenceid=28, filesize=5.0 K 2024-11-14T21:52:06,827 INFO [M:0;ef788a4cade8:61512 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.53 KB/7712, heapSize ~10.83 KB/11088, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 1398ms, sequenceid=28, compaction requested=false 2024-11-14T21:52:06,830 INFO [M:0;ef788a4cade8:61512 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T21:52:06,830 DEBUG [M:0;ef788a4cade8:61512 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731621125428Disabling compacts and flushes for region at 1731621125428Disabling writes for close at 1731621125429 (+1 ms)Obtaining lock to block concurrent updates at 1731621125429Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731621125429Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7712, getHeapSize=11328, getOffHeapSize=0, getCellsCount=35 at 1731621125430 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731621125432 (+2 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731621125432Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731621125451 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731621125451Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731621125871 (+420 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731621125892 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731621125893 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731621126332 (+439 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731621126352 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731621126353 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@361b8962: reopening flushed file at 1731621126787 (+434 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@67cc2488: reopening flushed file at 1731621126801 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@652c352: reopening flushed file at 1731621126814 (+13 ms)Finished flush of dataSize ~7.53 KB/7712, heapSize ~10.83 KB/11088, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 1398ms, sequenceid=28, compaction requested=false at 1731621126828 (+14 ms)Writing region close event to WAL at 1731621126830 (+2 ms)Closed at 1731621126830 2024-11-14T21:52:06,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35747 is added to blk_1073741830_1006 (size=10075) 2024-11-14T21:52:06,835 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T21:52:06,835 INFO [M:0;ef788a4cade8:61512 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-14T21:52:06,836 INFO [M:0;ef788a4cade8:61512 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:61512 2024-11-14T21:52:06,836 INFO [M:0;ef788a4cade8:61512 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T21:52:06,940 INFO [M:0;ef788a4cade8:61512 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T21:52:06,940 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:61512-0x100ac6cf3b80000, quorum=127.0.0.1:55688, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T21:52:06,940 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:61512-0x100ac6cf3b80000, quorum=127.0.0.1:55688, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T21:52:06,944 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-14T21:52:06,948 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@a7824a8{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T21:52:06,951 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@b9beb2c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T21:52:06,951 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T21:52:06,951 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5786f013{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T21:52:06,951 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2db0d459{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e7af7e66-6ea9-7b13-a13c-77d3544fa607/hadoop.log.dir/,STOPPED} 2024-11-14T21:52:06,957 WARN [BP-1239955793-172.17.0.2-1731621115837 heartbeating to localhost/127.0.0.1:45089 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T21:52:06,957 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T21:52:06,957 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T21:52:06,957 WARN [BP-1239955793-172.17.0.2-1731621115837 heartbeating to localhost/127.0.0.1:45089 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1239955793-172.17.0.2-1731621115837 (Datanode Uuid e11a5eb3-c5ea-4473-b4c4-030b79c48af2) service to localhost/127.0.0.1:45089 2024-11-14T21:52:06,959 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e7af7e66-6ea9-7b13-a13c-77d3544fa607/cluster_1ed275ca-d3f5-08d0-75ee-63f8b9a2d0b2/data/data1/current/BP-1239955793-172.17.0.2-1731621115837 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T21:52:06,959 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e7af7e66-6ea9-7b13-a13c-77d3544fa607/cluster_1ed275ca-d3f5-08d0-75ee-63f8b9a2d0b2/data/data2/current/BP-1239955793-172.17.0.2-1731621115837 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T21:52:06,959 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T21:52:06,970 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@70725491{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-14T21:52:06,971 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@13e3fa63{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T21:52:06,971 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T21:52:06,972 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@757ebaf5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T21:52:06,972 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@323438b9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e7af7e66-6ea9-7b13-a13c-77d3544fa607/hadoop.log.dir/,STOPPED} 2024-11-14T21:52:06,984 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.TestRegionServerReportForDuty#testReportForDutyWithRSRpcRetry Thread=50 (was 11) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:45089 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: RSDelayedStart-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: HRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45089 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/ef788a4cade8:61512:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/ef788a4cade8:61512:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1512999374) connection to localhost/127.0.0.1:45089 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'NameNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45089 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1512999374) connection to localhost/127.0.0.1:45089 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: regionserver/ef788a4cade8:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@d5c1683 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=332 (was 285) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=763 (was 785), ProcessCount=11 (was 11), AvailableMemoryMB=4112 (was 3589) - AvailableMemoryMB LEAK? - 2024-11-14T21:52:06,990 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.TestRegionServerReportForDuty#testReportForDutyWithMasterChange Thread=50, OpenFileDescriptor=332, MaxFileDescriptor=1048576, SystemLoadAverage=763, ProcessCount=11, AvailableMemoryMB=4112 2024-11-14T21:52:07,007 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e7af7e66-6ea9-7b13-a13c-77d3544fa607/hadoop.log.dir so I do NOT create it in target/test-data/3c9ddaa9-300f-24a5-4986-3711490f80fe 2024-11-14T21:52:07,008 WARN [Time-limited test {}] hbase.HBaseTestingUtil(405): hadoop.log.dir property value differs in configuration and system: Configuration=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/../logs while System=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e7af7e66-6ea9-7b13-a13c-77d3544fa607/hadoop.log.dir Erasing configuration value by system value. 2024-11-14T21:52:07,008 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e7af7e66-6ea9-7b13-a13c-77d3544fa607/hadoop.tmp.dir so I do NOT create it in target/test-data/3c9ddaa9-300f-24a5-4986-3711490f80fe 2024-11-14T21:52:07,008 WARN [Time-limited test {}] hbase.HBaseTestingUtil(405): hadoop.tmp.dir property value differs in configuration and system: Configuration=/tmp/hadoop-jenkins while System=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e7af7e66-6ea9-7b13-a13c-77d3544fa607/hadoop.tmp.dir Erasing configuration value by system value. 2024-11-14T21:52:07,008 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c9ddaa9-300f-24a5-4986-3711490f80fe 2024-11-14T21:52:07,008 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c9ddaa9-300f-24a5-4986-3711490f80fe/cluster_1a016d2a-59f3-dbee-2f24-f11810945b1c, deleteOnExit=true 2024-11-14T21:52:07,009 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c9ddaa9-300f-24a5-4986-3711490f80fe/test.cache.data in system properties and HBase conf 2024-11-14T21:52:07,009 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c9ddaa9-300f-24a5-4986-3711490f80fe/hadoop.tmp.dir in system properties and HBase conf 2024-11-14T21:52:07,009 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c9ddaa9-300f-24a5-4986-3711490f80fe/hadoop.log.dir in system properties and HBase conf 2024-11-14T21:52:07,009 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c9ddaa9-300f-24a5-4986-3711490f80fe/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-14T21:52:07,009 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c9ddaa9-300f-24a5-4986-3711490f80fe/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-14T21:52:07,009 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-14T21:52:07,010 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-14T21:52:07,010 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c9ddaa9-300f-24a5-4986-3711490f80fe/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-14T21:52:07,010 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c9ddaa9-300f-24a5-4986-3711490f80fe/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-14T21:52:07,010 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c9ddaa9-300f-24a5-4986-3711490f80fe/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-14T21:52:07,011 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c9ddaa9-300f-24a5-4986-3711490f80fe/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-14T21:52:07,011 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c9ddaa9-300f-24a5-4986-3711490f80fe/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-14T21:52:07,011 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c9ddaa9-300f-24a5-4986-3711490f80fe/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-14T21:52:07,011 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c9ddaa9-300f-24a5-4986-3711490f80fe/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-14T21:52:07,011 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c9ddaa9-300f-24a5-4986-3711490f80fe/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-14T21:52:07,011 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c9ddaa9-300f-24a5-4986-3711490f80fe/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-14T21:52:07,012 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c9ddaa9-300f-24a5-4986-3711490f80fe/nfs.dump.dir in system properties and HBase conf 2024-11-14T21:52:07,012 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c9ddaa9-300f-24a5-4986-3711490f80fe/java.io.tmpdir in system properties and HBase conf 2024-11-14T21:52:07,012 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c9ddaa9-300f-24a5-4986-3711490f80fe/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-14T21:52:07,012 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c9ddaa9-300f-24a5-4986-3711490f80fe/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-14T21:52:07,012 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c9ddaa9-300f-24a5-4986-3711490f80fe/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-14T21:52:07,130 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T21:52:07,137 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T21:52:07,143 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T21:52:07,143 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T21:52:07,143 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-14T21:52:07,144 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T21:52:07,146 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@209802d1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c9ddaa9-300f-24a5-4986-3711490f80fe/hadoop.log.dir/,AVAILABLE} 2024-11-14T21:52:07,146 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3e3e5b71{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T21:52:07,278 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@39f213d6{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c9ddaa9-300f-24a5-4986-3711490f80fe/java.io.tmpdir/jetty-localhost-41375-hadoop-hdfs-3_4_1-tests_jar-_-any-519050073847734866/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-14T21:52:07,279 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6fcf9162{HTTP/1.1, (http/1.1)}{localhost:41375} 2024-11-14T21:52:07,279 INFO [Time-limited test {}] server.Server(415): Started @14720ms 2024-11-14T21:52:07,386 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T21:52:07,391 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T21:52:07,393 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T21:52:07,394 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T21:52:07,394 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-14T21:52:07,395 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@a9c440{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c9ddaa9-300f-24a5-4986-3711490f80fe/hadoop.log.dir/,AVAILABLE} 2024-11-14T21:52:07,395 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1873df08{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T21:52:07,556 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@46eae375{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c9ddaa9-300f-24a5-4986-3711490f80fe/java.io.tmpdir/jetty-localhost-46177-hadoop-hdfs-3_4_1-tests_jar-_-any-7533496152153116826/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T21:52:07,557 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4efd02b9{HTTP/1.1, (http/1.1)}{localhost:46177} 2024-11-14T21:52:07,557 INFO [Time-limited test {}] server.Server(415): Started @14998ms 2024-11-14T21:52:07,559 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T21:52:07,678 WARN [Thread-213 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c9ddaa9-300f-24a5-4986-3711490f80fe/cluster_1a016d2a-59f3-dbee-2f24-f11810945b1c/data/data1/current/BP-371475406-172.17.0.2-1731621127055/current, will proceed with Du for space computation calculation, 2024-11-14T21:52:07,678 WARN [Thread-214 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c9ddaa9-300f-24a5-4986-3711490f80fe/cluster_1a016d2a-59f3-dbee-2f24-f11810945b1c/data/data2/current/BP-371475406-172.17.0.2-1731621127055/current, will proceed with Du for space computation calculation, 2024-11-14T21:52:07,708 WARN [Thread-201 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T21:52:07,712 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xaef67ad99508ea3a with lease ID 0xbda73da053404dd5: Processing first storage report for DS-1aa688d2-9371-4fad-a950-f82e44c7b32b from datanode DatanodeRegistration(127.0.0.1:40079, datanodeUuid=6b441693-ca33-4907-897a-89104b4b6a9d, infoPort=45235, infoSecurePort=0, ipcPort=37595, storageInfo=lv=-57;cid=testClusterID;nsid=1428058585;c=1731621127055) 2024-11-14T21:52:07,712 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xaef67ad99508ea3a with lease ID 0xbda73da053404dd5: from storage DS-1aa688d2-9371-4fad-a950-f82e44c7b32b node DatanodeRegistration(127.0.0.1:40079, datanodeUuid=6b441693-ca33-4907-897a-89104b4b6a9d, infoPort=45235, infoSecurePort=0, ipcPort=37595, storageInfo=lv=-57;cid=testClusterID;nsid=1428058585;c=1731621127055), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T21:52:07,712 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xaef67ad99508ea3a with lease ID 0xbda73da053404dd5: Processing first storage report for DS-9121e25a-5688-42c2-a4ad-5eed8bda80a9 from datanode DatanodeRegistration(127.0.0.1:40079, datanodeUuid=6b441693-ca33-4907-897a-89104b4b6a9d, infoPort=45235, infoSecurePort=0, ipcPort=37595, storageInfo=lv=-57;cid=testClusterID;nsid=1428058585;c=1731621127055) 2024-11-14T21:52:07,712 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xaef67ad99508ea3a with lease ID 0xbda73da053404dd5: from storage DS-9121e25a-5688-42c2-a4ad-5eed8bda80a9 node DatanodeRegistration(127.0.0.1:40079, datanodeUuid=6b441693-ca33-4907-897a-89104b4b6a9d, infoPort=45235, infoSecurePort=0, ipcPort=37595, storageInfo=lv=-57;cid=testClusterID;nsid=1428058585;c=1731621127055), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-14T21:52:07,803 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c9ddaa9-300f-24a5-4986-3711490f80fe 2024-11-14T21:52:07,806 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c9ddaa9-300f-24a5-4986-3711490f80fe/cluster_1a016d2a-59f3-dbee-2f24-f11810945b1c/zookeeper_0, clientPort=49891, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c9ddaa9-300f-24a5-4986-3711490f80fe/cluster_1a016d2a-59f3-dbee-2f24-f11810945b1c/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c9ddaa9-300f-24a5-4986-3711490f80fe/cluster_1a016d2a-59f3-dbee-2f24-f11810945b1c/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-14T21:52:07,807 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=49891 2024-11-14T21:52:07,808 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T21:52:07,809 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T21:52:07,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40079 is added to blk_1073741825_1001 (size=7) 2024-11-14T21:52:08,224 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583 with version=8 2024-11-14T21:52:08,226 INFO [Time-limited test {}] client.ConnectionUtils(128): master/ef788a4cade8:56286 server-side Connection retries=45 2024-11-14T21:52:08,226 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T21:52:08,226 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T21:52:08,226 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T21:52:08,226 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T21:52:08,226 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T21:52:08,226 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-14T21:52:08,227 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T21:52:08,227 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:56286 2024-11-14T21:52:08,228 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:56286 connecting to ZooKeeper ensemble=127.0.0.1:49891 2024-11-14T21:52:08,238 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:562860x0, quorum=127.0.0.1:49891, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T21:52:08,238 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:56286-0x100ac6d169d0000 connected 2024-11-14T21:52:08,256 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T21:52:08,258 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T21:52:08,260 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:56286-0x100ac6d169d0000, quorum=127.0.0.1:49891, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T21:52:08,260 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583, hbase.cluster.distributed=false 2024-11-14T21:52:08,262 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:56286-0x100ac6d169d0000, quorum=127.0.0.1:49891, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T21:52:08,262 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=56286 2024-11-14T21:52:08,263 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=56286 2024-11-14T21:52:08,263 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=56286 2024-11-14T21:52:08,263 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=56286 2024-11-14T21:52:08,263 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=56286 2024-11-14T21:52:08,264 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/ef788a4cade8:0 server-side Connection retries=45 2024-11-14T21:52:08,265 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T21:52:08,265 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T21:52:08,265 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T21:52:08,265 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T21:52:08,265 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T21:52:08,265 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-14T21:52:08,265 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T21:52:08,266 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35921 2024-11-14T21:52:08,267 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:35921 connecting to ZooKeeper ensemble=127.0.0.1:49891 2024-11-14T21:52:08,267 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T21:52:08,268 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T21:52:08,273 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:359210x0, quorum=127.0.0.1:49891, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T21:52:08,273 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:359210x0, quorum=127.0.0.1:49891, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T21:52:08,274 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35921-0x100ac6d169d0001 connected 2024-11-14T21:52:08,274 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-14T21:52:08,275 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-14T21:52:08,276 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35921-0x100ac6d169d0001, quorum=127.0.0.1:49891, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-14T21:52:08,277 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35921-0x100ac6d169d0001, quorum=127.0.0.1:49891, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T21:52:08,277 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35921 2024-11-14T21:52:08,278 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35921 2024-11-14T21:52:08,278 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35921 2024-11-14T21:52:08,279 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35921 2024-11-14T21:52:08,279 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35921 2024-11-14T21:52:08,279 DEBUG [Time-limited test {}] regionserver.TestRegionServerReportForDuty(181): Starting master: ef788a4cade8,56286,1731621128225 2024-11-14T21:52:08,291 DEBUG [M:0;ef788a4cade8:56286 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;ef788a4cade8:56286 2024-11-14T21:52:08,291 INFO [master/ef788a4cade8:56286:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/ef788a4cade8,56286,1731621128225 2024-11-14T21:52:08,295 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35921-0x100ac6d169d0001, quorum=127.0.0.1:49891, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T21:52:08,295 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:56286-0x100ac6d169d0000, quorum=127.0.0.1:49891, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T21:52:08,296 DEBUG [master/ef788a4cade8:56286:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:56286-0x100ac6d169d0000, quorum=127.0.0.1:49891, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/ef788a4cade8,56286,1731621128225 2024-11-14T21:52:08,298 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35921-0x100ac6d169d0001, quorum=127.0.0.1:49891, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-14T21:52:08,298 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:56286-0x100ac6d169d0000, quorum=127.0.0.1:49891, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T21:52:08,298 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35921-0x100ac6d169d0001, quorum=127.0.0.1:49891, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T21:52:08,298 DEBUG [master/ef788a4cade8:56286:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:56286-0x100ac6d169d0000, quorum=127.0.0.1:49891, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-14T21:52:08,299 INFO [master/ef788a4cade8:56286:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/ef788a4cade8,56286,1731621128225 from backup master directory 2024-11-14T21:52:08,301 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:56286-0x100ac6d169d0000, quorum=127.0.0.1:49891, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/ef788a4cade8,56286,1731621128225 2024-11-14T21:52:08,301 WARN [master/ef788a4cade8:56286:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T21:52:08,301 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:56286-0x100ac6d169d0000, quorum=127.0.0.1:49891, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T21:52:08,301 INFO [master/ef788a4cade8:56286:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=ef788a4cade8,56286,1731621128225 2024-11-14T21:52:08,301 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35921-0x100ac6d169d0001, quorum=127.0.0.1:49891, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T21:52:08,310 DEBUG [master/ef788a4cade8:56286:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/hbase.id] with ID: 3540b608-16c4-4532-b393-2df7e878b412 2024-11-14T21:52:08,311 DEBUG [master/ef788a4cade8:56286:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/.tmp/hbase.id 2024-11-14T21:52:08,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40079 is added to blk_1073741826_1002 (size=42) 2024-11-14T21:52:08,719 DEBUG [master/ef788a4cade8:56286:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/.tmp/hbase.id]:[hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/hbase.id] 2024-11-14T21:52:08,738 INFO [master/ef788a4cade8:56286:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T21:52:08,738 INFO [master/ef788a4cade8:56286:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-14T21:52:08,740 INFO [master/ef788a4cade8:56286:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-14T21:52:08,743 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:56286-0x100ac6d169d0000, quorum=127.0.0.1:49891, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T21:52:08,743 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35921-0x100ac6d169d0001, quorum=127.0.0.1:49891, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T21:52:08,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40079 is added to blk_1073741827_1003 (size=196) 2024-11-14T21:52:08,782 DEBUG [Time-limited test {}] regionserver.TestRegionServerReportForDuty(349): Waiting for master to come online ... 2024-11-14T21:52:08,982 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T21:52:09,156 INFO [master/ef788a4cade8:56286:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-14T21:52:09,157 INFO [master/ef788a4cade8:56286:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-14T21:52:09,158 INFO [master/ef788a4cade8:56286:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-14T21:52:09,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40079 is added to blk_1073741828_1004 (size=1189) 2024-11-14T21:52:09,282 DEBUG [Time-limited test {}] regionserver.TestRegionServerReportForDuty(349): Waiting for master to come online ... 2024-11-14T21:52:09,485 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-14T21:52:09,492 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T21:52:09,579 INFO [master/ef788a4cade8:56286:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/data/master/store 2024-11-14T21:52:09,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40079 is added to blk_1073741829_1005 (size=34) 2024-11-14T21:52:09,783 DEBUG [Time-limited test {}] regionserver.TestRegionServerReportForDuty(349): Waiting for master to come online ... 2024-11-14T21:52:09,998 DEBUG [master/ef788a4cade8:56286:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T21:52:09,998 DEBUG [master/ef788a4cade8:56286:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-14T21:52:09,998 INFO [master/ef788a4cade8:56286:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T21:52:09,998 DEBUG [master/ef788a4cade8:56286:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T21:52:09,998 DEBUG [master/ef788a4cade8:56286:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-14T21:52:09,998 DEBUG [master/ef788a4cade8:56286:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T21:52:09,998 INFO [master/ef788a4cade8:56286:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T21:52:09,998 DEBUG [master/ef788a4cade8:56286:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731621129998Disabling compacts and flushes for region at 1731621129998Disabling writes for close at 1731621129998Writing region close event to WAL at 1731621129998Closed at 1731621129998 2024-11-14T21:52:10,000 WARN [master/ef788a4cade8:56286:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/data/master/store/.initializing 2024-11-14T21:52:10,000 DEBUG [master/ef788a4cade8:56286:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/WALs/ef788a4cade8,56286,1731621128225 2024-11-14T21:52:10,001 INFO [master/ef788a4cade8:56286:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-14T21:52:10,004 INFO [master/ef788a4cade8:56286:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ef788a4cade8%2C56286%2C1731621128225, suffix=, logDir=hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/WALs/ef788a4cade8,56286,1731621128225, archiveDir=hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/oldWALs, maxLogs=10 2024-11-14T21:52:10,019 DEBUG [master/ef788a4cade8:56286:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/WALs/ef788a4cade8,56286,1731621128225/ef788a4cade8%2C56286%2C1731621128225.1731621130004, exclude list is [], retry=0 2024-11-14T21:52:10,023 DEBUG [HRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40079,DS-1aa688d2-9371-4fad-a950-f82e44c7b32b,DISK] 2024-11-14T21:52:10,027 INFO [master/ef788a4cade8:56286:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/WALs/ef788a4cade8,56286,1731621128225/ef788a4cade8%2C56286%2C1731621128225.1731621130004 2024-11-14T21:52:10,027 DEBUG [master/ef788a4cade8:56286:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45235:45235)] 2024-11-14T21:52:10,027 DEBUG [master/ef788a4cade8:56286:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-14T21:52:10,028 DEBUG [master/ef788a4cade8:56286:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T21:52:10,028 DEBUG [master/ef788a4cade8:56286:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T21:52:10,028 DEBUG [master/ef788a4cade8:56286:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T21:52:10,030 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T21:52:10,032 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-14T21:52:10,032 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T21:52:10,033 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T21:52:10,033 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T21:52:10,035 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-14T21:52:10,035 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T21:52:10,036 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T21:52:10,036 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T21:52:10,037 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-14T21:52:10,037 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T21:52:10,038 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T21:52:10,038 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T21:52:10,040 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-14T21:52:10,041 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T21:52:10,041 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T21:52:10,041 DEBUG [master/ef788a4cade8:56286:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T21:52:10,042 DEBUG [master/ef788a4cade8:56286:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-14T21:52:10,043 DEBUG [master/ef788a4cade8:56286:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-14T21:52:10,045 DEBUG [master/ef788a4cade8:56286:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T21:52:10,045 DEBUG [master/ef788a4cade8:56286:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T21:52:10,046 DEBUG [master/ef788a4cade8:56286:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-14T21:52:10,047 DEBUG [master/ef788a4cade8:56286:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T21:52:10,050 DEBUG [master/ef788a4cade8:56286:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T21:52:10,051 INFO [master/ef788a4cade8:56286:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67836943, jitterRate=0.010849222540855408}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-14T21:52:10,051 DEBUG [master/ef788a4cade8:56286:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731621130028Initializing all the Stores at 1731621130030 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731621130030Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731621130030Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731621130030Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731621130030Cleaning up temporary data from old regions at 1731621130045 (+15 ms)Region opened successfully at 1731621130051 (+6 ms) 2024-11-14T21:52:10,052 INFO [master/ef788a4cade8:56286:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-14T21:52:10,056 DEBUG [master/ef788a4cade8:56286:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@66334952, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=ef788a4cade8/172.17.0.2:0 2024-11-14T21:52:10,057 INFO [master/ef788a4cade8:56286:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-14T21:52:10,058 INFO [master/ef788a4cade8:56286:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-14T21:52:10,058 INFO [master/ef788a4cade8:56286:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-14T21:52:10,058 INFO [master/ef788a4cade8:56286:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-14T21:52:10,058 INFO [master/ef788a4cade8:56286:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-14T21:52:10,059 INFO [master/ef788a4cade8:56286:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-14T21:52:10,059 INFO [master/ef788a4cade8:56286:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-14T21:52:10,061 INFO [master/ef788a4cade8:56286:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-14T21:52:10,062 DEBUG [master/ef788a4cade8:56286:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:56286-0x100ac6d169d0000, quorum=127.0.0.1:49891, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-14T21:52:10,064 DEBUG [master/ef788a4cade8:56286:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-14T21:52:10,064 INFO [master/ef788a4cade8:56286:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-14T21:52:10,065 DEBUG [master/ef788a4cade8:56286:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:56286-0x100ac6d169d0000, quorum=127.0.0.1:49891, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-14T21:52:10,068 DEBUG [master/ef788a4cade8:56286:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-14T21:52:10,068 INFO [master/ef788a4cade8:56286:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-14T21:52:10,069 DEBUG [master/ef788a4cade8:56286:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:56286-0x100ac6d169d0000, quorum=127.0.0.1:49891, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-14T21:52:10,070 DEBUG [master/ef788a4cade8:56286:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-14T21:52:10,071 DEBUG [master/ef788a4cade8:56286:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:56286-0x100ac6d169d0000, quorum=127.0.0.1:49891, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-14T21:52:10,072 DEBUG [master/ef788a4cade8:56286:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-14T21:52:10,074 DEBUG [master/ef788a4cade8:56286:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:56286-0x100ac6d169d0000, quorum=127.0.0.1:49891, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-14T21:52:10,076 DEBUG [master/ef788a4cade8:56286:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-14T21:52:10,080 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:56286-0x100ac6d169d0000, quorum=127.0.0.1:49891, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-14T21:52:10,080 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:56286-0x100ac6d169d0000, quorum=127.0.0.1:49891, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T21:52:10,080 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35921-0x100ac6d169d0001, quorum=127.0.0.1:49891, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-14T21:52:10,080 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35921-0x100ac6d169d0001, quorum=127.0.0.1:49891, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T21:52:10,080 INFO [master/ef788a4cade8:56286:becomeActiveMaster {}] master.HMaster(856): Active/primary master=ef788a4cade8,56286,1731621128225, sessionid=0x100ac6d169d0000, setting cluster-up flag (Was=false) 2024-11-14T21:52:10,082 INFO [RS:0;ef788a4cade8:35921 {}] regionserver.HRegionServer(746): ClusterId : 3540b608-16c4-4532-b393-2df7e878b412 2024-11-14T21:52:10,082 DEBUG [RS:0;ef788a4cade8:35921 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-14T21:52:10,084 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35921-0x100ac6d169d0001, quorum=127.0.0.1:49891, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T21:52:10,084 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:56286-0x100ac6d169d0000, quorum=127.0.0.1:49891, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T21:52:10,091 DEBUG [RS:0;ef788a4cade8:35921 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-14T21:52:10,091 DEBUG [RS:0;ef788a4cade8:35921 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-14T21:52:10,093 DEBUG [master/ef788a4cade8:56286:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-14T21:52:10,096 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35921-0x100ac6d169d0001, quorum=127.0.0.1:49891, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T21:52:10,096 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:56286-0x100ac6d169d0000, quorum=127.0.0.1:49891, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T21:52:10,096 DEBUG [master/ef788a4cade8:56286:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=ef788a4cade8,56286,1731621128225 2024-11-14T21:52:10,103 DEBUG [master/ef788a4cade8:56286:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-14T21:52:10,103 DEBUG [RS:0;ef788a4cade8:35921 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-14T21:52:10,103 DEBUG [RS:0;ef788a4cade8:35921 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@36236e1e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=ef788a4cade8/172.17.0.2:0 2024-11-14T21:52:10,104 DEBUG [master/ef788a4cade8:56286:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=ef788a4cade8,56286,1731621128225 2024-11-14T21:52:10,106 INFO [master/ef788a4cade8:56286:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-14T21:52:10,114 DEBUG [master/ef788a4cade8:56286:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-14T21:52:10,114 INFO [master/ef788a4cade8:56286:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-14T21:52:10,114 INFO [master/ef788a4cade8:56286:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-14T21:52:10,115 DEBUG [master/ef788a4cade8:56286:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: ef788a4cade8,56286,1731621128225 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-14T21:52:10,116 DEBUG [master/ef788a4cade8:56286:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/ef788a4cade8:56286, corePoolSize=5, maxPoolSize=5 2024-11-14T21:52:10,116 DEBUG [master/ef788a4cade8:56286:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/ef788a4cade8:56286, corePoolSize=5, maxPoolSize=5 2024-11-14T21:52:10,116 DEBUG [master/ef788a4cade8:56286:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/ef788a4cade8:56286, corePoolSize=5, maxPoolSize=5 2024-11-14T21:52:10,116 DEBUG [master/ef788a4cade8:56286:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/ef788a4cade8:56286, corePoolSize=5, maxPoolSize=5 2024-11-14T21:52:10,116 DEBUG [master/ef788a4cade8:56286:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/ef788a4cade8:56286, corePoolSize=10, maxPoolSize=10 2024-11-14T21:52:10,117 DEBUG [master/ef788a4cade8:56286:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/ef788a4cade8:56286, corePoolSize=1, maxPoolSize=1 2024-11-14T21:52:10,117 DEBUG [master/ef788a4cade8:56286:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/ef788a4cade8:56286, corePoolSize=2, maxPoolSize=2 2024-11-14T21:52:10,117 DEBUG [master/ef788a4cade8:56286:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/ef788a4cade8:56286, corePoolSize=1, maxPoolSize=1 2024-11-14T21:52:10,118 DEBUG [RS:0;ef788a4cade8:35921 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;ef788a4cade8:35921 2024-11-14T21:52:10,118 INFO [RS:0;ef788a4cade8:35921 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-14T21:52:10,118 INFO [RS:0;ef788a4cade8:35921 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-14T21:52:10,118 DEBUG [RS:0;ef788a4cade8:35921 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-14T21:52:10,119 INFO [RS:0;ef788a4cade8:35921 {}] regionserver.HRegionServer(2659): reportForDuty to master=ef788a4cade8,56286,1731621128225 with port=35921, startcode=1731621128264 2024-11-14T21:52:10,119 INFO [master/ef788a4cade8:56286:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731621160119 2024-11-14T21:52:10,120 DEBUG [RS:0;ef788a4cade8:35921 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-14T21:52:10,120 INFO [master/ef788a4cade8:56286:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-14T21:52:10,120 INFO [master/ef788a4cade8:56286:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-14T21:52:10,120 INFO [master/ef788a4cade8:56286:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-14T21:52:10,120 INFO [master/ef788a4cade8:56286:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-14T21:52:10,120 INFO [master/ef788a4cade8:56286:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-14T21:52:10,120 INFO [master/ef788a4cade8:56286:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-14T21:52:10,120 INFO [master/ef788a4cade8:56286:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-14T21:52:10,121 INFO [master/ef788a4cade8:56286:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-14T21:52:10,121 INFO [master/ef788a4cade8:56286:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-14T21:52:10,121 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T21:52:10,121 INFO [master/ef788a4cade8:56286:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-14T21:52:10,121 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-14T21:52:10,121 INFO [master/ef788a4cade8:56286:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-14T21:52:10,121 INFO [master/ef788a4cade8:56286:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-14T21:52:10,122 DEBUG [master/ef788a4cade8:56286:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/ef788a4cade8:56286:becomeActiveMaster-HFileCleaner.large.0-1731621130121,5,FailOnTimeoutGroup] 2024-11-14T21:52:10,122 DEBUG [master/ef788a4cade8:56286:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/ef788a4cade8:56286:becomeActiveMaster-HFileCleaner.small.0-1731621130122,5,FailOnTimeoutGroup] 2024-11-14T21:52:10,122 INFO [master/ef788a4cade8:56286:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-14T21:52:10,122 INFO [master/ef788a4cade8:56286:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-14T21:52:10,122 INFO [master/ef788a4cade8:56286:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-14T21:52:10,122 INFO [master/ef788a4cade8:56286:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-14T21:52:10,122 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T21:52:10,123 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-14T21:52:10,123 INFO [HMaster-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52755, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=RegionServerStatusService 2024-11-14T21:52:10,124 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=56286 {}] master.ServerManager(363): Checking decommissioned status of RegionServer ef788a4cade8,35921,1731621128264 2024-11-14T21:52:10,124 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=56286 {}] master.ServerManager(517): Registering regionserver=ef788a4cade8,35921,1731621128264 2024-11-14T21:52:10,126 DEBUG [RS:0;ef788a4cade8:35921 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583 2024-11-14T21:52:10,126 DEBUG [RS:0;ef788a4cade8:35921 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:36589 2024-11-14T21:52:10,126 DEBUG [RS:0;ef788a4cade8:35921 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-14T21:52:10,129 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:56286-0x100ac6d169d0000, quorum=127.0.0.1:49891, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T21:52:10,129 DEBUG [RS:0;ef788a4cade8:35921 {}] zookeeper.ZKUtil(111): regionserver:35921-0x100ac6d169d0001, quorum=127.0.0.1:49891, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/ef788a4cade8,35921,1731621128264 2024-11-14T21:52:10,130 WARN [RS:0;ef788a4cade8:35921 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T21:52:10,130 INFO [RS:0;ef788a4cade8:35921 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-14T21:52:10,130 DEBUG [RS:0;ef788a4cade8:35921 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/WALs/ef788a4cade8,35921,1731621128264 2024-11-14T21:52:10,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40079 is added to blk_1073741831_1007 (size=1321) 2024-11-14T21:52:10,130 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [ef788a4cade8,35921,1731621128264] 2024-11-14T21:52:10,132 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-14T21:52:10,132 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583 2024-11-14T21:52:10,134 INFO [RS:0;ef788a4cade8:35921 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-14T21:52:10,137 INFO [RS:0;ef788a4cade8:35921 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-14T21:52:10,137 INFO [RS:0;ef788a4cade8:35921 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-14T21:52:10,137 INFO [RS:0;ef788a4cade8:35921 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T21:52:10,138 INFO [RS:0;ef788a4cade8:35921 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-14T21:52:10,139 INFO [RS:0;ef788a4cade8:35921 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-14T21:52:10,140 INFO [RS:0;ef788a4cade8:35921 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-14T21:52:10,140 DEBUG [RS:0;ef788a4cade8:35921 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/ef788a4cade8:0, corePoolSize=1, maxPoolSize=1 2024-11-14T21:52:10,140 DEBUG [RS:0;ef788a4cade8:35921 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/ef788a4cade8:0, corePoolSize=1, maxPoolSize=1 2024-11-14T21:52:10,140 DEBUG [RS:0;ef788a4cade8:35921 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/ef788a4cade8:0, corePoolSize=1, maxPoolSize=1 2024-11-14T21:52:10,140 DEBUG [RS:0;ef788a4cade8:35921 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/ef788a4cade8:0, corePoolSize=1, maxPoolSize=1 2024-11-14T21:52:10,140 DEBUG [RS:0;ef788a4cade8:35921 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/ef788a4cade8:0, corePoolSize=1, maxPoolSize=1 2024-11-14T21:52:10,140 DEBUG [RS:0;ef788a4cade8:35921 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/ef788a4cade8:0, corePoolSize=2, maxPoolSize=2 2024-11-14T21:52:10,140 DEBUG [RS:0;ef788a4cade8:35921 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/ef788a4cade8:0, corePoolSize=1, maxPoolSize=1 2024-11-14T21:52:10,141 DEBUG [RS:0;ef788a4cade8:35921 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/ef788a4cade8:0, corePoolSize=1, maxPoolSize=1 2024-11-14T21:52:10,141 DEBUG [RS:0;ef788a4cade8:35921 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/ef788a4cade8:0, corePoolSize=1, maxPoolSize=1 2024-11-14T21:52:10,141 DEBUG [RS:0;ef788a4cade8:35921 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/ef788a4cade8:0, corePoolSize=1, maxPoolSize=1 2024-11-14T21:52:10,141 DEBUG [RS:0;ef788a4cade8:35921 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/ef788a4cade8:0, corePoolSize=1, maxPoolSize=1 2024-11-14T21:52:10,141 DEBUG [RS:0;ef788a4cade8:35921 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/ef788a4cade8:0, corePoolSize=1, maxPoolSize=1 2024-11-14T21:52:10,141 DEBUG [RS:0;ef788a4cade8:35921 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/ef788a4cade8:0, corePoolSize=3, maxPoolSize=3 2024-11-14T21:52:10,141 DEBUG [RS:0;ef788a4cade8:35921 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/ef788a4cade8:0, corePoolSize=3, maxPoolSize=3 2024-11-14T21:52:10,142 INFO [RS:0;ef788a4cade8:35921 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T21:52:10,142 INFO [RS:0;ef788a4cade8:35921 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T21:52:10,142 INFO [RS:0;ef788a4cade8:35921 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T21:52:10,142 INFO [RS:0;ef788a4cade8:35921 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-14T21:52:10,142 INFO [RS:0;ef788a4cade8:35921 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-14T21:52:10,142 INFO [RS:0;ef788a4cade8:35921 {}] hbase.ChoreService(168): Chore ScheduledChore name=ef788a4cade8,35921,1731621128264-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T21:52:10,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40079 is added to blk_1073741832_1008 (size=32) 2024-11-14T21:52:10,162 INFO [RS:0;ef788a4cade8:35921 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-14T21:52:10,162 INFO [RS:0;ef788a4cade8:35921 {}] hbase.ChoreService(168): Chore ScheduledChore name=ef788a4cade8,35921,1731621128264-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T21:52:10,162 INFO [RS:0;ef788a4cade8:35921 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T21:52:10,162 INFO [RS:0;ef788a4cade8:35921 {}] regionserver.Replication(171): ef788a4cade8,35921,1731621128264 started 2024-11-14T21:52:10,177 INFO [RS:0;ef788a4cade8:35921 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T21:52:10,177 INFO [RS:0;ef788a4cade8:35921 {}] regionserver.HRegionServer(1482): Serving as ef788a4cade8,35921,1731621128264, RpcServer on ef788a4cade8/172.17.0.2:35921, sessionid=0x100ac6d169d0001 2024-11-14T21:52:10,177 DEBUG [RS:0;ef788a4cade8:35921 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-14T21:52:10,177 DEBUG [RS:0;ef788a4cade8:35921 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager ef788a4cade8,35921,1731621128264 2024-11-14T21:52:10,177 DEBUG [RS:0;ef788a4cade8:35921 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'ef788a4cade8,35921,1731621128264' 2024-11-14T21:52:10,177 DEBUG [RS:0;ef788a4cade8:35921 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-14T21:52:10,178 DEBUG [RS:0;ef788a4cade8:35921 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-14T21:52:10,179 DEBUG [RS:0;ef788a4cade8:35921 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-14T21:52:10,179 DEBUG [RS:0;ef788a4cade8:35921 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-14T21:52:10,179 DEBUG [RS:0;ef788a4cade8:35921 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager ef788a4cade8,35921,1731621128264 2024-11-14T21:52:10,179 DEBUG [RS:0;ef788a4cade8:35921 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'ef788a4cade8,35921,1731621128264' 2024-11-14T21:52:10,179 DEBUG [RS:0;ef788a4cade8:35921 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-14T21:52:10,179 DEBUG [RS:0;ef788a4cade8:35921 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-14T21:52:10,180 DEBUG [RS:0;ef788a4cade8:35921 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-14T21:52:10,180 INFO [RS:0;ef788a4cade8:35921 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-14T21:52:10,180 INFO [RS:0;ef788a4cade8:35921 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-14T21:52:10,281 INFO [RS:0;ef788a4cade8:35921 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-14T21:52:10,283 DEBUG [Time-limited test {}] regionserver.TestRegionServerReportForDuty(349): Waiting for master to come online ... 2024-11-14T21:52:10,284 INFO [RS:0;ef788a4cade8:35921 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ef788a4cade8%2C35921%2C1731621128264, suffix=, logDir=hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/WALs/ef788a4cade8,35921,1731621128264, archiveDir=hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/oldWALs, maxLogs=32 2024-11-14T21:52:10,299 DEBUG [RS:0;ef788a4cade8:35921 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/WALs/ef788a4cade8,35921,1731621128264/ef788a4cade8%2C35921%2C1731621128264.1731621130285, exclude list is [], retry=0 2024-11-14T21:52:10,302 DEBUG [HRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40079,DS-1aa688d2-9371-4fad-a950-f82e44c7b32b,DISK] 2024-11-14T21:52:10,305 INFO [RS:0;ef788a4cade8:35921 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/WALs/ef788a4cade8,35921,1731621128264/ef788a4cade8%2C35921%2C1731621128264.1731621130285 2024-11-14T21:52:10,305 DEBUG [RS:0;ef788a4cade8:35921 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45235:45235)] 2024-11-14T21:52:10,503 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-14T21:52:10,503 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-14T21:52:10,544 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T21:52:10,547 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-14T21:52:10,549 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-14T21:52:10,549 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T21:52:10,550 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T21:52:10,550 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-14T21:52:10,552 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-14T21:52:10,552 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T21:52:10,553 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T21:52:10,553 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-14T21:52:10,555 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-14T21:52:10,555 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T21:52:10,556 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T21:52:10,556 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-14T21:52:10,557 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-14T21:52:10,557 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T21:52:10,558 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T21:52:10,558 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-14T21:52:10,559 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/data/hbase/meta/1588230740 2024-11-14T21:52:10,560 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/data/hbase/meta/1588230740 2024-11-14T21:52:10,561 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-14T21:52:10,562 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-14T21:52:10,562 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-14T21:52:10,564 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-14T21:52:10,567 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T21:52:10,567 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66876219, jitterRate=-0.003466680645942688}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-14T21:52:10,568 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731621130545Initializing all the Stores at 1731621130546 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731621130546Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731621130547 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731621130547Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731621130547Cleaning up temporary data from old regions at 1731621130562 (+15 ms)Region opened successfully at 1731621130568 (+6 ms) 2024-11-14T21:52:10,568 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-14T21:52:10,568 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-14T21:52:10,568 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-14T21:52:10,569 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-14T21:52:10,569 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-14T21:52:10,569 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-14T21:52:10,569 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731621130568Disabling compacts and flushes for region at 1731621130568Disabling writes for close at 1731621130569 (+1 ms)Writing region close event to WAL at 1731621130569Closed at 1731621130569 2024-11-14T21:52:10,571 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T21:52:10,571 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-14T21:52:10,571 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-14T21:52:10,573 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-14T21:52:10,575 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-14T21:52:10,726 DEBUG [ef788a4cade8:56286 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-14T21:52:10,726 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=ef788a4cade8,35921,1731621128264 2024-11-14T21:52:10,729 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as ef788a4cade8,35921,1731621128264, state=OPENING 2024-11-14T21:52:10,731 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-14T21:52:10,737 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:56286-0x100ac6d169d0000, quorum=127.0.0.1:49891, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T21:52:10,737 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35921-0x100ac6d169d0001, quorum=127.0.0.1:49891, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T21:52:10,738 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-14T21:52:10,738 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T21:52:10,738 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T21:52:10,739 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=ef788a4cade8,35921,1731621128264}] 2024-11-14T21:52:10,783 DEBUG [Time-limited test {}] regionserver.TestRegionServerReportForDuty(349): Waiting for master to come online ... 2024-11-14T21:52:10,894 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-14T21:52:10,897 INFO [HRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57535, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-14T21:52:10,902 INFO [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-14T21:52:10,902 INFO [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-14T21:52:10,903 INFO [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-14T21:52:10,906 INFO [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ef788a4cade8%2C35921%2C1731621128264.meta, suffix=.meta, logDir=hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/WALs/ef788a4cade8,35921,1731621128264, archiveDir=hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/oldWALs, maxLogs=32 2024-11-14T21:52:10,925 DEBUG [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/WALs/ef788a4cade8,35921,1731621128264/ef788a4cade8%2C35921%2C1731621128264.meta.1731621130907.meta, exclude list is [], retry=0 2024-11-14T21:52:10,930 DEBUG [HRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40079,DS-1aa688d2-9371-4fad-a950-f82e44c7b32b,DISK] 2024-11-14T21:52:10,935 INFO [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/WALs/ef788a4cade8,35921,1731621128264/ef788a4cade8%2C35921%2C1731621128264.meta.1731621130907.meta 2024-11-14T21:52:10,937 DEBUG [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45235:45235)] 2024-11-14T21:52:10,938 DEBUG [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-14T21:52:10,938 DEBUG [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-14T21:52:10,938 DEBUG [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-14T21:52:10,938 INFO [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-14T21:52:10,939 DEBUG [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-14T21:52:10,939 DEBUG [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T21:52:10,939 DEBUG [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-14T21:52:10,939 DEBUG [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-14T21:52:10,943 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-14T21:52:10,944 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-14T21:52:10,944 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T21:52:10,945 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T21:52:10,945 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-14T21:52:10,946 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-14T21:52:10,947 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T21:52:10,947 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T21:52:10,947 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-14T21:52:10,949 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-14T21:52:10,949 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T21:52:10,949 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T21:52:10,950 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-14T21:52:10,951 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-14T21:52:10,951 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T21:52:10,953 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T21:52:10,953 DEBUG [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-14T21:52:10,954 DEBUG [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/data/hbase/meta/1588230740 2024-11-14T21:52:10,956 DEBUG [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/data/hbase/meta/1588230740 2024-11-14T21:52:10,958 DEBUG [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-14T21:52:10,959 DEBUG [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-14T21:52:10,959 DEBUG [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-14T21:52:10,962 DEBUG [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-14T21:52:10,964 INFO [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68554764, jitterRate=0.021545588970184326}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-14T21:52:10,964 DEBUG [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-14T21:52:10,965 DEBUG [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731621130939Writing region info on filesystem at 1731621130939Initializing all the Stores at 1731621130941 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731621130941Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731621130942 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731621130942Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731621130942Cleaning up temporary data from old regions at 1731621130959 (+17 ms)Running coprocessor post-open hooks at 1731621130964 (+5 ms)Region opened successfully at 1731621130965 (+1 ms) 2024-11-14T21:52:10,967 INFO [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731621130893 2024-11-14T21:52:10,972 DEBUG [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-14T21:52:10,972 INFO [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-14T21:52:10,973 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=ef788a4cade8,35921,1731621128264 2024-11-14T21:52:10,974 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as ef788a4cade8,35921,1731621128264, state=OPEN 2024-11-14T21:52:10,976 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:56286-0x100ac6d169d0000, quorum=127.0.0.1:49891, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-14T21:52:10,976 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=ef788a4cade8,35921,1731621128264 2024-11-14T21:52:10,976 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T21:52:10,978 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35921-0x100ac6d169d0001, quorum=127.0.0.1:49891, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-14T21:52:10,979 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T21:52:10,981 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-14T21:52:10,981 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=ef788a4cade8,35921,1731621128264 in 238 msec 2024-11-14T21:52:10,987 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-14T21:52:10,987 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 410 msec 2024-11-14T21:52:10,988 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T21:52:10,988 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-14T21:52:10,990 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-14T21:52:10,991 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ef788a4cade8,35921,1731621128264, seqNum=-1] 2024-11-14T21:52:10,991 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-14T21:52:10,993 INFO [HRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47051, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-14T21:52:11,007 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 889 msec 2024-11-14T21:52:11,007 INFO [master/ef788a4cade8:56286:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731621131007, completionTime=-1 2024-11-14T21:52:11,007 INFO [master/ef788a4cade8:56286:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-14T21:52:11,008 DEBUG [master/ef788a4cade8:56286:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-14T21:52:11,011 INFO [master/ef788a4cade8:56286:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-14T21:52:11,011 INFO [master/ef788a4cade8:56286:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731621191011 2024-11-14T21:52:11,011 INFO [master/ef788a4cade8:56286:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731621251011 2024-11-14T21:52:11,011 INFO [master/ef788a4cade8:56286:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 3 msec 2024-11-14T21:52:11,012 INFO [master/ef788a4cade8:56286:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef788a4cade8,56286,1731621128225-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T21:52:11,012 INFO [master/ef788a4cade8:56286:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef788a4cade8,56286,1731621128225-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T21:52:11,012 INFO [master/ef788a4cade8:56286:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef788a4cade8,56286,1731621128225-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T21:52:11,012 INFO [master/ef788a4cade8:56286:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-ef788a4cade8:56286, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T21:52:11,012 INFO [master/ef788a4cade8:56286:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-14T21:52:11,014 INFO [master/ef788a4cade8:56286:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-14T21:52:11,015 DEBUG [master/ef788a4cade8:56286.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-14T21:52:11,021 INFO [master/ef788a4cade8:56286:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.719sec 2024-11-14T21:52:11,021 INFO [master/ef788a4cade8:56286:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-14T21:52:11,021 INFO [master/ef788a4cade8:56286:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-14T21:52:11,021 INFO [master/ef788a4cade8:56286:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-14T21:52:11,021 INFO [master/ef788a4cade8:56286:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-14T21:52:11,021 INFO [master/ef788a4cade8:56286:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-14T21:52:11,021 INFO [master/ef788a4cade8:56286:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef788a4cade8,56286,1731621128225-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T21:52:11,021 INFO [master/ef788a4cade8:56286:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef788a4cade8,56286,1731621128225-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-14T21:52:11,025 DEBUG [master/ef788a4cade8:56286:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-14T21:52:11,025 INFO [master/ef788a4cade8:56286:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-14T21:52:11,025 INFO [master/ef788a4cade8:56286:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef788a4cade8,56286,1731621128225-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T21:52:11,283 DEBUG [Time-limited test {}] regionserver.TestRegionServerReportForDuty(349): Waiting for master to come online ... 2024-11-14T21:52:11,286 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/ef788a4cade8:0 server-side Connection retries=45 2024-11-14T21:52:11,287 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T21:52:11,287 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T21:52:11,287 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T21:52:11,287 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T21:52:11,287 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T21:52:11,287 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-14T21:52:11,287 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T21:52:11,288 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35045 2024-11-14T21:52:11,289 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:35045 connecting to ZooKeeper ensemble=127.0.0.1:49891 2024-11-14T21:52:11,289 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T21:52:11,291 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T21:52:11,295 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:350450x0, quorum=127.0.0.1:49891, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T21:52:11,296 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:350450x0, quorum=127.0.0.1:49891, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-11-14T21:52:11,296 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35045-0x100ac6d169d0002 connected 2024-11-14T21:52:11,296 DEBUG [pool-232-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-11-14T21:52:11,297 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-14T21:52:11,297 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-14T21:52:11,298 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:35045-0x100ac6d169d0002, quorum=127.0.0.1:49891, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-14T21:52:11,301 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35045-0x100ac6d169d0002, quorum=127.0.0.1:49891, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T21:52:11,302 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35045 2024-11-14T21:52:11,302 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35045 2024-11-14T21:52:11,303 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35045 2024-11-14T21:52:11,303 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35045 2024-11-14T21:52:11,303 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35045 2024-11-14T21:52:11,304 DEBUG [Time-limited test {}] regionserver.TestRegionServerReportForDuty(193): Starting 2nd region server: ef788a4cade8,35045,1731621131286 2024-11-14T21:52:11,305 INFO [RS:1;ef788a4cade8:35045 {}] regionserver.HRegionServer(746): ClusterId : 3540b608-16c4-4532-b393-2df7e878b412 2024-11-14T21:52:11,305 DEBUG [RS:1;ef788a4cade8:35045 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-14T21:52:11,308 DEBUG [RS:1;ef788a4cade8:35045 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-14T21:52:11,308 DEBUG [RS:1;ef788a4cade8:35045 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-14T21:52:11,311 DEBUG [RS:1;ef788a4cade8:35045 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-14T21:52:11,311 DEBUG [RS:1;ef788a4cade8:35045 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7c8b252a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=ef788a4cade8/172.17.0.2:0 2024-11-14T21:52:11,331 DEBUG [RS:1;ef788a4cade8:35045 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;ef788a4cade8:35045 2024-11-14T21:52:11,332 INFO [RS:1;ef788a4cade8:35045 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-14T21:52:11,332 INFO [RS:1;ef788a4cade8:35045 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-14T21:52:11,332 DEBUG [RS:1;ef788a4cade8:35045 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-14T21:52:11,804 DEBUG [Time-limited test {}] regionserver.TestRegionServerReportForDuty(360): Waiting 2nd RS to be started ... 2024-11-14T21:52:11,804 INFO [Time-limited test {}] master.HMaster(3321): ***** STOPPING master 'ef788a4cade8,56286,1731621128225' ***** 2024-11-14T21:52:11,804 INFO [Time-limited test {}] master.HMaster(3323): STOPPED: Stopping master 2024-11-14T21:52:11,804 INFO [M:0;ef788a4cade8:56286 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T21:52:11,804 INFO [M:0;ef788a4cade8:56286 {}] client.AsyncConnectionImpl(233): Connection has been closed by M:0;ef788a4cade8:56286. 2024-11-14T21:52:11,804 DEBUG [M:0;ef788a4cade8:56286 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:630) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T21:52:11,804 DEBUG [M:0;ef788a4cade8:56286 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T21:52:11,805 INFO [M:0;ef788a4cade8:56286 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T21:52:11,805 DEBUG [M:0;ef788a4cade8:56286 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-14T21:52:11,805 DEBUG [M:0;ef788a4cade8:56286 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-14T21:52:11,805 DEBUG [master/ef788a4cade8:56286:becomeActiveMaster-HFileCleaner.large.0-1731621130121 {}] cleaner.HFileCleaner(306): Exit Thread[master/ef788a4cade8:56286:becomeActiveMaster-HFileCleaner.large.0-1731621130121,5,FailOnTimeoutGroup] 2024-11-14T21:52:11,805 INFO [M:0;ef788a4cade8:56286 {}] hbase.ChoreService(370): Chore service for: master/ef788a4cade8:56286 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-14T21:52:11,805 DEBUG [master/ef788a4cade8:56286:becomeActiveMaster-HFileCleaner.small.0-1731621130122 {}] cleaner.HFileCleaner(306): Exit Thread[master/ef788a4cade8:56286:becomeActiveMaster-HFileCleaner.small.0-1731621130122,5,FailOnTimeoutGroup] 2024-11-14T21:52:11,805 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-14T21:52:11,805 INFO [M:0;ef788a4cade8:56286 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T21:52:11,805 DEBUG [M:0;ef788a4cade8:56286 {}] master.HMaster(1795): Stopping service threads 2024-11-14T21:52:11,805 INFO [M:0;ef788a4cade8:56286 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-14T21:52:11,806 INFO [M:0;ef788a4cade8:56286 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-14T21:52:11,806 INFO [M:0;ef788a4cade8:56286 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-14T21:52:11,807 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-14T21:52:11,810 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35045-0x100ac6d169d0002, quorum=127.0.0.1:49891, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-14T21:52:11,810 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35921-0x100ac6d169d0001, quorum=127.0.0.1:49891, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-14T21:52:11,810 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35045-0x100ac6d169d0002, quorum=127.0.0.1:49891, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T21:52:11,810 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35921-0x100ac6d169d0001, quorum=127.0.0.1:49891, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T21:52:11,810 DEBUG [M:0;ef788a4cade8:56286 {}] zookeeper.ZKUtil(347): master:56286-0x100ac6d169d0000, quorum=127.0.0.1:49891, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-14T21:52:11,810 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:56286-0x100ac6d169d0000, quorum=127.0.0.1:49891, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-14T21:52:11,810 WARN [M:0;ef788a4cade8:56286 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-14T21:52:11,810 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:56286-0x100ac6d169d0000, quorum=127.0.0.1:49891, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T21:52:11,811 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35045-0x100ac6d169d0002, quorum=127.0.0.1:49891, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-14T21:52:11,811 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35921-0x100ac6d169d0001, quorum=127.0.0.1:49891, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-14T21:52:11,811 INFO [Time-limited test {}] client.ConnectionUtils(128): master/ef788a4cade8:52351 server-side Connection retries=45 2024-11-14T21:52:11,811 INFO [M:0;ef788a4cade8:56286 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/.lastflushedseqids 2024-11-14T21:52:11,812 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T21:52:11,812 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T21:52:11,812 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T21:52:11,812 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T21:52:11,812 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T21:52:11,812 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-14T21:52:11,812 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T21:52:11,813 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:52351 2024-11-14T21:52:11,814 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:52351 connecting to ZooKeeper ensemble=127.0.0.1:49891 2024-11-14T21:52:11,816 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:523510x0, quorum=127.0.0.1:49891, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T21:52:11,817 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:52351-0x100ac6d169d0003 connected 2024-11-14T21:52:11,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40079 is added to blk_1073741835_1011 (size=99) 2024-11-14T21:52:11,821 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T21:52:11,822 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T21:52:11,824 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): master:52351-0x100ac6d169d0003, quorum=127.0.0.1:49891, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-11-14T21:52:11,824 DEBUG [pool-242-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-11-14T21:52:11,824 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583, hbase.cluster.distributed=false 2024-11-14T21:52:11,826 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:52351-0x100ac6d169d0003, quorum=127.0.0.1:49891, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T21:52:11,827 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=52351 2024-11-14T21:52:11,828 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=52351 2024-11-14T21:52:11,828 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=52351 2024-11-14T21:52:11,830 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=52351 2024-11-14T21:52:11,831 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=52351 2024-11-14T21:52:11,831 DEBUG [Time-limited test {}] regionserver.TestRegionServerReportForDuty(208): Starting new master: ef788a4cade8,52351,1731621131811 2024-11-14T21:52:11,834 DEBUG [RS:1;ef788a4cade8:35045 {}] regionserver.TestRegionServerReportForDuty$MyRegionServer(399): Waiting for master switch over ... 2024-11-14T21:52:11,843 DEBUG [M:1;ef788a4cade8:52351 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:1;ef788a4cade8:52351 2024-11-14T21:52:11,843 INFO [master/ef788a4cade8:52351:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/ef788a4cade8,52351,1731621131811 2024-11-14T21:52:11,845 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35921-0x100ac6d169d0001, quorum=127.0.0.1:49891, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T21:52:11,845 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:56286-0x100ac6d169d0000, quorum=127.0.0.1:49891, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T21:52:11,845 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35045-0x100ac6d169d0002, quorum=127.0.0.1:49891, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T21:52:11,845 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:52351-0x100ac6d169d0003, quorum=127.0.0.1:49891, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T21:52:11,846 DEBUG [master/ef788a4cade8:52351:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:52351-0x100ac6d169d0003, quorum=127.0.0.1:49891, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/ef788a4cade8,52351,1731621131811 2024-11-14T21:52:11,851 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:56286-0x100ac6d169d0000, quorum=127.0.0.1:49891, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T21:52:11,851 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35045-0x100ac6d169d0002, quorum=127.0.0.1:49891, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-14T21:52:11,851 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35921-0x100ac6d169d0001, quorum=127.0.0.1:49891, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-14T21:52:11,851 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:52351-0x100ac6d169d0003, quorum=127.0.0.1:49891, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T21:52:11,851 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35921-0x100ac6d169d0001, quorum=127.0.0.1:49891, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T21:52:11,851 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35045-0x100ac6d169d0002, quorum=127.0.0.1:49891, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T21:52:11,852 DEBUG [master/ef788a4cade8:52351:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:52351-0x100ac6d169d0003, quorum=127.0.0.1:49891, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-14T21:52:11,853 INFO [master/ef788a4cade8:52351:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/ef788a4cade8,52351,1731621131811 from backup master directory 2024-11-14T21:52:11,855 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:52351-0x100ac6d169d0003, quorum=127.0.0.1:49891, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/ef788a4cade8,52351,1731621131811 2024-11-14T21:52:11,855 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35921-0x100ac6d169d0001, quorum=127.0.0.1:49891, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T21:52:11,855 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35045-0x100ac6d169d0002, quorum=127.0.0.1:49891, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T21:52:11,855 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:56286-0x100ac6d169d0000, quorum=127.0.0.1:49891, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T21:52:11,855 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:52351-0x100ac6d169d0003, quorum=127.0.0.1:49891, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T21:52:11,855 WARN [master/ef788a4cade8:52351:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T21:52:11,855 INFO [master/ef788a4cade8:52351:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=ef788a4cade8,52351,1731621131811 2024-11-14T21:52:11,875 INFO [master/ef788a4cade8:52351:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T21:52:11,875 INFO [master/ef788a4cade8:52351:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-14T21:52:11,886 INFO [master/ef788a4cade8:52351:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=1) cost 11ms. 2024-11-14T21:52:11,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40079 is added to blk_1073741836_1012 (size=196) 2024-11-14T21:52:12,219 INFO [M:0;ef788a4cade8:56286 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-14T21:52:12,219 INFO [M:0;ef788a4cade8:56286 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-14T21:52:12,219 DEBUG [M:0;ef788a4cade8:56286 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-14T21:52:12,219 INFO [M:0;ef788a4cade8:56286 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T21:52:12,219 DEBUG [M:0;ef788a4cade8:56286 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T21:52:12,219 DEBUG [M:0;ef788a4cade8:56286 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-14T21:52:12,220 DEBUG [M:0;ef788a4cade8:56286 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T21:52:12,220 INFO [M:0;ef788a4cade8:56286 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.53 KB heapSize=11.13 KB 2024-11-14T21:52:12,239 DEBUG [M:0;ef788a4cade8:56286 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/2f7314376f5a42a0ad973104c991bfa7 is 82, key is hbase:meta,,1/info:regioninfo/1731621130972/Put/seqid=0 2024-11-14T21:52:12,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40079 is added to blk_1073741837_1013 (size=5672) 2024-11-14T21:52:12,297 INFO [master/ef788a4cade8:52351:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-14T21:52:12,297 INFO [master/ef788a4cade8:52351:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-14T21:52:12,298 INFO [master/ef788a4cade8:52351:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-14T21:52:12,308 DEBUG [master/ef788a4cade8:52351:becomeActiveMaster {}] region.MasterRegion(342): old store file tracker DEFAULT is the same with new store file tracker, skip migration 2024-11-14T21:52:12,311 INFO [master/ef788a4cade8:52351:becomeActiveMaster {}] region.MasterRegion(316): Renamed hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/WALs/ef788a4cade8,56286,1731621128225 to hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/WALs/ef788a4cade8,56286,1731621128225-dead as it is dead 2024-11-14T21:52:12,313 DEBUG [master/ef788a4cade8:52351:becomeActiveMaster {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-14T21:52:12,313 DEBUG [master/ef788a4cade8:52351:becomeActiveMaster {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-14T21:52:12,313 INFO [master/ef788a4cade8:52351:becomeActiveMaster {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/WALs/ef788a4cade8,56286,1731621128225-dead/ef788a4cade8%2C56286%2C1731621128225.1731621130004 2024-11-14T21:52:12,316 WARN [IPC Server handler 3 on default port 36589 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/WALs/ef788a4cade8,56286,1731621128225-dead/ef788a4cade8%2C56286%2C1731621128225.1731621130004 has not been closed. Lease recovery is in progress. RecoveryId = 1014 for block blk_1073741830_1006 2024-11-14T21:52:12,320 INFO [master/ef788a4cade8:52351:becomeActiveMaster {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/WALs/ef788a4cade8,56286,1731621128225-dead/ef788a4cade8%2C56286%2C1731621128225.1731621130004 after 5ms 2024-11-14T21:52:12,331 DEBUG [Time-limited test {}] regionserver.TestRegionServerReportForDuty(349): Waiting for master to come online ... 2024-11-14T21:52:12,334 DEBUG [RS:1;ef788a4cade8:35045 {}] regionserver.TestRegionServerReportForDuty$MyRegionServer(399): Waiting for master switch over ... 2024-11-14T21:52:12,335 INFO [RS:1;ef788a4cade8:35045 {}] regionserver.HRegionServer(2659): reportForDuty to master=ef788a4cade8,56286,1731621128225 with port=35045, startcode=1731621131286 2024-11-14T21:52:12,336 DEBUG [RS:1;ef788a4cade8:35045 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-14T21:52:12,338 INFO [HMaster-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56219, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=RegionServerStatusService 2024-11-14T21:52:12,339 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=56286 {}] master.ServerManager(363): Checking decommissioned status of RegionServer ef788a4cade8,35045,1731621131286 2024-11-14T21:52:12,339 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=56286 {}] master.ServerManager(517): Registering regionserver=ef788a4cade8,35045,1731621131286 2024-11-14T21:52:12,339 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=56286 {}] ipc.MetricsHBaseServer(152): Unknown exception type java.lang.NullPointerException: Cannot invoke "org.apache.hadoop.hbase.procedure2.ProcedureExecutor.getEnvironment()" because the return value of "org.apache.hadoop.hbase.master.MasterServices.getMasterProcedureExecutor()" is null at org.apache.hadoop.hbase.master.replication.SyncReplicationReplayWALManager$1.serverAdded(SyncReplicationReplayWALManager.java:152) ~[classes/:?] at org.apache.hadoop.hbase.master.ServerManager.checkAndRecordNewServer(ServerManager.java:408) ~[classes/:?] at org.apache.hadoop.hbase.master.ServerManager.regionServerStartup(ServerManager.java:271) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:679) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-14T21:52:12,339 ERROR [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=56286 {}] ipc.RpcServer(511): Unexpected throwable object java.lang.NullPointerException: Cannot invoke "org.apache.hadoop.hbase.procedure2.ProcedureExecutor.getEnvironment()" because the return value of "org.apache.hadoop.hbase.master.MasterServices.getMasterProcedureExecutor()" is null at org.apache.hadoop.hbase.master.replication.SyncReplicationReplayWALManager$1.serverAdded(SyncReplicationReplayWALManager.java:152) ~[classes/:?] at org.apache.hadoop.hbase.master.ServerManager.checkAndRecordNewServer(ServerManager.java:408) ~[classes/:?] at org.apache.hadoop.hbase.master.ServerManager.regionServerStartup(ServerManager.java:271) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:679) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-14T21:52:12,342 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=56286 {}] ipc.CallRunner(138): callId: 0 service: RegionServerStatusService methodName: RegionServerStartup size: 52 connection: 172.17.0.2:56219 deadline: 1731621142338, exception=java.io.IOException: Cannot invoke "org.apache.hadoop.hbase.procedure2.ProcedureExecutor.getEnvironment()" because the return value of "org.apache.hadoop.hbase.master.MasterServices.getMasterProcedureExecutor()" is null 2024-11-14T21:52:12,343 WARN [RS:1;ef788a4cade8:35045 {}] regionserver.HRegionServer(2685): error telling master we are up org.apache.hbase.thirdparty.com.google.protobuf.ServiceException: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(java.io.IOException): java.io.IOException: Cannot invoke "org.apache.hadoop.hbase.procedure2.ProcedureExecutor.getEnvironment()" because the return value of "org.apache.hadoop.hbase.master.MasterServices.getMasterProcedureExecutor()" is null at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:512) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Caused by: java.lang.NullPointerException: Cannot invoke "org.apache.hadoop.hbase.procedure2.ProcedureExecutor.getEnvironment()" because the return value of "org.apache.hadoop.hbase.master.MasterServices.getMasterProcedureExecutor()" is null at org.apache.hadoop.hbase.master.replication.SyncReplicationReplayWALManager$1.serverAdded(SyncReplicationReplayWALManager.java:152) at org.apache.hadoop.hbase.master.ServerManager.checkAndRecordNewServer(ServerManager.java:408) at org.apache.hadoop.hbase.master.ServerManager.regionServerStartup(ServerManager.java:271) at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:679) at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ... 3 more at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:344) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.regionServerStartup(RegionServerStatusProtos.java:17266) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportForDuty(HRegionServer.java:2670) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegionServer.lambda$run$1(HRegionServer.java:838) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:833) ~[classes/:?] at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at java.security.AccessController.doPrivileged(AccessController.java:399) ~[?:?] at javax.security.auth.Subject.doAs(Subject.java:376) ~[?:?] at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: java.io.IOException: Cannot invoke "org.apache.hadoop.hbase.procedure2.ProcedureExecutor.getEnvironment()" because the return value of "org.apache.hadoop.hbase.master.MasterServices.getMasterProcedureExecutor()" is null at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:512) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Caused by: java.lang.NullPointerException: Cannot invoke "org.apache.hadoop.hbase.procedure2.ProcedureExecutor.getEnvironment()" because the return value of "org.apache.hadoop.hbase.master.MasterServices.getMasterProcedureExecutor()" is null at org.apache.hadoop.hbase.master.replication.SyncReplicationReplayWALManager$1.serverAdded(SyncReplicationReplayWALManager.java:152) at org.apache.hadoop.hbase.master.ServerManager.checkAndRecordNewServer(ServerManager.java:408) at org.apache.hadoop.hbase.master.ServerManager.regionServerStartup(ServerManager.java:271) at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:679) at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ... 3 more at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollStreamChannel$EpollStreamUnsafe.epollInReady(AbstractEpollStreamChannel.java:799) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:501) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] ... 1 more 2024-11-14T21:52:12,344 WARN [RS:1;ef788a4cade8:35045 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-14T21:52:12,445 INFO [RS:1;ef788a4cade8:35045 {}] regionserver.HRegionServer(2659): reportForDuty to master=ef788a4cade8,52351,1731621131811 with port=35045, startcode=1731621131286 2024-11-14T21:52:12,445 DEBUG [RS:1;ef788a4cade8:35045 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-14T21:52:12,447 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42093, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=RegionServerStatusService 2024-11-14T21:52:12,448 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=52351 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-14T21:52:12,449 DEBUG [RS:1;ef788a4cade8:35045 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-14T21:52:12,449 WARN [RS:1;ef788a4cade8:35045 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 200 ms and then retrying. 2024-11-14T21:52:12,646 INFO [M:0;ef788a4cade8:56286 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=28 (bloomFilter=true), to=hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/2f7314376f5a42a0ad973104c991bfa7 2024-11-14T21:52:12,650 INFO [RS:1;ef788a4cade8:35045 {}] regionserver.HRegionServer(2659): reportForDuty to master=ef788a4cade8,52351,1731621131811 with port=35045, startcode=1731621131286 2024-11-14T21:52:12,652 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=52351 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-14T21:52:12,654 DEBUG [RS:1;ef788a4cade8:35045 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-14T21:52:12,654 WARN [RS:1;ef788a4cade8:35045 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 400 ms and then retrying. 2024-11-14T21:52:12,679 DEBUG [M:0;ef788a4cade8:56286 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a3cac0704ad0423ea5348a34f18cd426 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1731621131003/Put/seqid=0 2024-11-14T21:52:12,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40079 is added to blk_1073741838_1015 (size=5275) 2024-11-14T21:52:12,831 DEBUG [Time-limited test {}] regionserver.TestRegionServerReportForDuty(349): Waiting for master to come online ... 2024-11-14T21:52:13,055 INFO [RS:1;ef788a4cade8:35045 {}] regionserver.HRegionServer(2659): reportForDuty to master=ef788a4cade8,52351,1731621131811 with port=35045, startcode=1731621131286 2024-11-14T21:52:13,057 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=52351 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-14T21:52:13,058 DEBUG [RS:1;ef788a4cade8:35045 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-14T21:52:13,058 WARN [RS:1;ef788a4cade8:35045 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 800 ms and then retrying. 2024-11-14T21:52:13,087 INFO [M:0;ef788a4cade8:56286 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.98 KB at sequenceid=28 (bloomFilter=true), to=hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a3cac0704ad0423ea5348a34f18cd426 2024-11-14T21:52:13,119 DEBUG [M:0;ef788a4cade8:56286 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/85e70937cb494feebadffe37654e300c is 69, key is ef788a4cade8,35921,1731621128264/rs:state/1731621130124/Put/seqid=0 2024-11-14T21:52:13,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40079 is added to blk_1073741839_1016 (size=5156) 2024-11-14T21:52:13,141 INFO [M:0;ef788a4cade8:56286 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=28 (bloomFilter=true), to=hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/85e70937cb494feebadffe37654e300c 2024-11-14T21:52:13,156 DEBUG [M:0;ef788a4cade8:56286 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/2f7314376f5a42a0ad973104c991bfa7 as hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/2f7314376f5a42a0ad973104c991bfa7 2024-11-14T21:52:13,167 INFO [M:0;ef788a4cade8:56286 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/2f7314376f5a42a0ad973104c991bfa7, entries=8, sequenceid=28, filesize=5.5 K 2024-11-14T21:52:13,169 DEBUG [M:0;ef788a4cade8:56286 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a3cac0704ad0423ea5348a34f18cd426 as hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a3cac0704ad0423ea5348a34f18cd426 2024-11-14T21:52:13,178 INFO [M:0;ef788a4cade8:56286 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a3cac0704ad0423ea5348a34f18cd426, entries=3, sequenceid=28, filesize=5.2 K 2024-11-14T21:52:13,180 DEBUG [M:0;ef788a4cade8:56286 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/85e70937cb494feebadffe37654e300c as hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/85e70937cb494feebadffe37654e300c 2024-11-14T21:52:13,190 INFO [M:0;ef788a4cade8:56286 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/85e70937cb494feebadffe37654e300c, entries=1, sequenceid=28, filesize=5.0 K 2024-11-14T21:52:13,193 INFO [M:0;ef788a4cade8:56286 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.53 KB/7712, heapSize ~10.83 KB/11088, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 972ms, sequenceid=28, compaction requested=false 2024-11-14T21:52:13,195 INFO [M:0;ef788a4cade8:56286 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T21:52:13,196 DEBUG [M:0;ef788a4cade8:56286 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731621132219Disabling compacts and flushes for region at 1731621132219Disabling writes for close at 1731621132219Obtaining lock to block concurrent updates at 1731621132220 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731621132220Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7712, getHeapSize=11328, getOffHeapSize=0, getCellsCount=35 at 1731621132220Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731621132221 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731621132221Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731621132238 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731621132238Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731621132657 (+419 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731621132679 (+22 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731621132679Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731621133097 (+418 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731621133118 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731621133118Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@169cf139: reopening flushed file at 1731621133154 (+36 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@63f092c0: reopening flushed file at 1731621133167 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@64f0f814: reopening flushed file at 1731621133179 (+12 ms)Finished flush of dataSize ~7.53 KB/7712, heapSize ~10.83 KB/11088, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 972ms, sequenceid=28, compaction requested=false at 1731621133193 (+14 ms)Writing region close event to WAL at 1731621133195 (+2 ms)Closed at 1731621133195 2024-11-14T21:52:13,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40079 is added to blk_1073741830_1006 (size=10075) 2024-11-14T21:52:13,203 WARN [Close-WAL-Writer-0 {}] wal.AsyncProtobufLogWriter(165): normal close failed, try recover java.io.IOException: Commit or complete block blk_1073741830_1006, whereas it is under recovery. at org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.commitOrCompleteLastBlock(BlockManager.java:1211) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.commitOrCompleteLastBlock(FSNamesystem.java:3890) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:731) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:110) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:653) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:163) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2041) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.ipc.RemoteException: Commit or complete block blk_1073741830_1006, whereas it is under recovery. at org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.commitOrCompleteLastBlock(BlockManager.java:1211) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.commitOrCompleteLastBlock(FSNamesystem.java:3890) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:731) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy40.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor5.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy41.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor5.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy44.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor5.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy44.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor5.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy44.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor5.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy44.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor5.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy44.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor5.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy44.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor5.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy44.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 6 more 2024-11-14T21:52:13,204 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file /user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/WALs/ef788a4cade8,56286,1731621128225/ef788a4cade8%2C56286%2C1731621128225.1731621130004 2024-11-14T21:52:13,206 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/WALs/ef788a4cade8,56286,1731621128225/ef788a4cade8%2C56286%2C1731621128225.1731621130004 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.recoverLease(FSNamesystem.java:2869) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.recoverLease(NameNodeRpcServer.java:872) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.recoverLease(ClientNamenodeProtocolServerSideTranslatorPB.java:834) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:951) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.recoverAndClose(FanOutOneBlockAsyncDFSOutput.java:605) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:166) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2041) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/WALs/ef788a4cade8,56286,1731621128225/ef788a4cade8%2C56286%2C1731621128225.1731621130004 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.recoverLease(FSNamesystem.java:2869) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.recoverLease(NameNodeRpcServer.java:872) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.recoverLease(ClientNamenodeProtocolServerSideTranslatorPB.java:834) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy40.recoverLease(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$recoverLease$24(ClientNamenodeProtocolTranslatorPB.java:685) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.recoverLease(ClientNamenodeProtocolTranslatorPB.java:685) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor30.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy41.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor30.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy44.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor30.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy44.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor30.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy44.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor30.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy44.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor30.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy44.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor30.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy44.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor30.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy44.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor30.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy44.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor30.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy44.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy44.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy44.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy44.recoverLease(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:949) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 17 more 2024-11-14T21:52:13,207 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/WALs/ef788a4cade8,56286,1731621128225/ef788a4cade8%2C56286%2C1731621128225.1731621130004 2024-11-14T21:52:13,207 ERROR [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2033): Unable to recover lease after several attempts. Give up. java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/WALs/ef788a4cade8,56286,1731621128225/ef788a4cade8%2C56286%2C1731621128225.1731621130004 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.recoverLease(FSNamesystem.java:2869) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.recoverLease(NameNodeRpcServer.java:872) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.recoverLease(ClientNamenodeProtocolServerSideTranslatorPB.java:834) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:951) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/WALs/ef788a4cade8,56286,1731621128225/ef788a4cade8%2C56286%2C1731621128225.1731621130004 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.recoverLease(FSNamesystem.java:2869) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.recoverLease(NameNodeRpcServer.java:872) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.recoverLease(ClientNamenodeProtocolServerSideTranslatorPB.java:834) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy40.recoverLease(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$recoverLease$24(ClientNamenodeProtocolTranslatorPB.java:685) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.recoverLease(ClientNamenodeProtocolTranslatorPB.java:685) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor30.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy41.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor30.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy44.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor30.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy44.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor30.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy44.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor30.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy44.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor30.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy44.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor30.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy44.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor30.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy44.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor30.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy44.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor30.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy44.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor30.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy44.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor30.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy44.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor30.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy44.recoverLease(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:949) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 16 more 2024-11-14T21:52:13,209 INFO [M:0;ef788a4cade8:56286 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-14T21:52:13,209 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T21:52:13,209 INFO [M:0;ef788a4cade8:56286 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:56286 2024-11-14T21:52:13,212 INFO [M:0;ef788a4cade8:56286 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T21:52:13,295 WARN [HRegionServer-EventLoopGroup-5-1 {}] ipc.NettyRpcConnection$2(409): Exception encountered while connecting to the server ef788a4cade8:56286 org.apache.hbase.thirdparty.io.netty.channel.AbstractChannel$AnnotatedConnectException: finishConnect(..) failed: Connection refused: ef788a4cade8/172.17.0.2:56286 Caused by: java.net.ConnectException: finishConnect(..) failed: Connection refused at org.apache.hbase.thirdparty.io.netty.channel.unix.Errors.newConnectException0(Errors.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.unix.Errors.handleConnectErrno(Errors.java:131) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.unix.Socket.finishConnect(Socket.java:359) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.doFinishConnect(AbstractEpollChannel.java:715) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.finishConnect(AbstractEpollChannel.java:692) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.epollOutReady(AbstractEpollChannel.java:567) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:491) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T21:52:13,296 DEBUG [HRegionServer-EventLoopGroup-5-1 {}] ipc.FailedServers(52): Added failed server with address ef788a4cade8:56286 to list caused by org.apache.hbase.thirdparty.io.netty.channel.AbstractChannel$AnnotatedConnectException: finishConnect(..) failed: Connection refused: ef788a4cade8/172.17.0.2:56286 2024-11-14T21:52:13,313 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:56286-0x100ac6d169d0000, quorum=127.0.0.1:49891, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T21:52:13,313 INFO [M:0;ef788a4cade8:56286 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T21:52:13,313 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:56286-0x100ac6d169d0000, quorum=127.0.0.1:49891, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T21:52:13,332 DEBUG [Time-limited test {}] regionserver.TestRegionServerReportForDuty(349): Waiting for master to come online ... 2024-11-14T21:52:13,397 DEBUG [RS:0;ef788a4cade8:35921 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-14T21:52:13,400 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46001, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=RegionServerStatusService 2024-11-14T21:52:13,401 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=52351 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-14T21:52:13,504 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=52351 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-14T21:52:13,608 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=52351 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-14T21:52:13,711 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=52351 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-14T21:52:13,815 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=52351 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-14T21:52:13,832 DEBUG [Time-limited test {}] regionserver.TestRegionServerReportForDuty(349): Waiting for master to come online ... 2024-11-14T21:52:13,859 INFO [RS:1;ef788a4cade8:35045 {}] regionserver.HRegionServer(2659): reportForDuty to master=ef788a4cade8,52351,1731621131811 with port=35045, startcode=1731621131286 2024-11-14T21:52:13,861 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=52351 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-14T21:52:13,862 DEBUG [RS:1;ef788a4cade8:35045 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-14T21:52:13,862 WARN [RS:1;ef788a4cade8:35045 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 1600 ms and then retrying. 2024-11-14T21:52:13,919 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=52351 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-14T21:52:14,023 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=52351 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-14T21:52:14,127 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=52351 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-14T21:52:14,230 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=52351 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-14T21:52:14,332 DEBUG [Time-limited test {}] regionserver.TestRegionServerReportForDuty(349): Waiting for master to come online ... 2024-11-14T21:52:14,333 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=52351 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-14T21:52:14,437 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=52351 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-14T21:52:14,541 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=52351 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-14T21:52:14,645 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=52351 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-14T21:52:14,748 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=52351 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-14T21:52:14,832 DEBUG [Time-limited test {}] regionserver.TestRegionServerReportForDuty(349): Waiting for master to come online ... 2024-11-14T21:52:14,852 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=52351 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-14T21:52:14,956 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=52351 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-14T21:52:15,060 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=52351 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-14T21:52:15,163 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=52351 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-14T21:52:15,266 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=52351 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-14T21:52:15,333 DEBUG [Time-limited test {}] regionserver.TestRegionServerReportForDuty(349): Waiting for master to come online ... 2024-11-14T21:52:15,370 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=52351 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-14T21:52:15,463 INFO [RS:1;ef788a4cade8:35045 {}] regionserver.HRegionServer(2659): reportForDuty to master=ef788a4cade8,52351,1731621131811 with port=35045, startcode=1731621131286 2024-11-14T21:52:15,464 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=52351 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-14T21:52:15,465 DEBUG [RS:1;ef788a4cade8:35045 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-14T21:52:15,465 WARN [RS:1;ef788a4cade8:35045 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 3200 ms and then retrying. 2024-11-14T21:52:15,473 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=52351 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-14T21:52:15,577 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=52351 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-14T21:52:15,680 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=52351 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-14T21:52:15,784 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=52351 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-14T21:52:15,833 DEBUG [Time-limited test {}] regionserver.TestRegionServerReportForDuty(349): Waiting for master to come online ... 2024-11-14T21:52:15,887 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=52351 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-14T21:52:15,990 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=52351 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-14T21:52:16,094 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=52351 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-14T21:52:16,198 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=52351 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-14T21:52:16,301 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=52351 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-14T21:52:16,321 INFO [master/ef788a4cade8:52351:becomeActiveMaster {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/WALs/ef788a4cade8,56286,1731621128225-dead/ef788a4cade8%2C56286%2C1731621128225.1731621130004 after 4008ms 2024-11-14T21:52:16,322 INFO [master/ef788a4cade8:52351:becomeActiveMaster {}] region.MasterRegion(328): Renamed hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/WALs/ef788a4cade8,56286,1731621128225-dead/ef788a4cade8%2C56286%2C1731621128225.1731621130004 to hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.wals/ef788a4cade8%2C56286%2C1731621128225.1731621130004 2024-11-14T21:52:16,322 INFO [master/ef788a4cade8:52351:becomeActiveMaster {}] region.MasterRegion(330): Delete empty local region wal dir hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/WALs/ef788a4cade8,56286,1731621128225-dead 2024-11-14T21:52:16,323 DEBUG [master/ef788a4cade8:52351:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/WALs/ef788a4cade8,52351,1731621131811 2024-11-14T21:52:16,324 INFO [master/ef788a4cade8:52351:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-14T21:52:16,326 INFO [master/ef788a4cade8:52351:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ef788a4cade8%2C52351%2C1731621131811, suffix=, logDir=hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/WALs/ef788a4cade8,52351,1731621131811, archiveDir=hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/oldWALs, maxLogs=10 2024-11-14T21:52:16,333 DEBUG [Time-limited test {}] regionserver.TestRegionServerReportForDuty(349): Waiting for master to come online ... 2024-11-14T21:52:16,340 DEBUG [master/ef788a4cade8:52351:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/WALs/ef788a4cade8,52351,1731621131811/ef788a4cade8%2C52351%2C1731621131811.1731621136326, exclude list is [], retry=0 2024-11-14T21:52:16,343 DEBUG [HMaster-EventLoopGroup-7-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40079,DS-1aa688d2-9371-4fad-a950-f82e44c7b32b,DISK] 2024-11-14T21:52:16,345 INFO [master/ef788a4cade8:52351:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/WALs/ef788a4cade8,52351,1731621131811/ef788a4cade8%2C52351%2C1731621131811.1731621136326 2024-11-14T21:52:16,346 DEBUG [master/ef788a4cade8:52351:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45235:45235)] 2024-11-14T21:52:16,346 DEBUG [master/ef788a4cade8:52351:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-14T21:52:16,346 DEBUG [master/ef788a4cade8:52351:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T21:52:16,346 DEBUG [master/ef788a4cade8:52351:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T21:52:16,346 DEBUG [master/ef788a4cade8:52351:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T21:52:16,348 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T21:52:16,349 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-14T21:52:16,350 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T21:52:16,360 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/2f7314376f5a42a0ad973104c991bfa7 2024-11-14T21:52:16,360 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T21:52:16,361 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T21:52:16,362 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-14T21:52:16,362 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T21:52:16,370 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a3cac0704ad0423ea5348a34f18cd426 2024-11-14T21:52:16,370 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T21:52:16,370 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T21:52:16,371 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-14T21:52:16,371 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T21:52:16,380 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/85e70937cb494feebadffe37654e300c 2024-11-14T21:52:16,380 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T21:52:16,380 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T21:52:16,381 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-14T21:52:16,382 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T21:52:16,382 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T21:52:16,382 DEBUG [master/ef788a4cade8:52351:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T21:52:16,383 DEBUG [master/ef788a4cade8:52351:becomeActiveMaster {}] regionserver.HRegion(5516): Found 1 recovered edits file(s) under hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.wals 2024-11-14T21:52:16,384 INFO [master/ef788a4cade8:52351:becomeActiveMaster {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.wals/ef788a4cade8%2C56286%2C1731621128225.1731621130004 2024-11-14T21:52:16,397 DEBUG [master/ef788a4cade8:52351:becomeActiveMaster {}] regionserver.HRegion(5793): Applied 0, skipped 37, firstSequenceIdInLog=3, maxSequenceIdInLog=30, path=hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.wals/ef788a4cade8%2C56286%2C1731621128225.1731621130004 2024-11-14T21:52:16,398 DEBUG [master/ef788a4cade8:52351:becomeActiveMaster {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.wals/ef788a4cade8%2C56286%2C1731621128225.1731621130004 2024-11-14T21:52:16,401 DEBUG [master/ef788a4cade8:52351:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T21:52:16,401 DEBUG [master/ef788a4cade8:52351:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T21:52:16,402 DEBUG [master/ef788a4cade8:52351:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-14T21:52:16,404 DEBUG [master/ef788a4cade8:52351:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T21:52:16,405 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=52351 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-14T21:52:16,409 DEBUG [master/ef788a4cade8:52351:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/30.seqid, newMaxSeqId=30, maxSeqId=1 2024-11-14T21:52:16,410 INFO [master/ef788a4cade8:52351:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=31; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65101185, jitterRate=-0.02991674840450287}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-14T21:52:16,412 DEBUG [master/ef788a4cade8:52351:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731621136347Initializing all the Stores at 1731621136348 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731621136348Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731621136348Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731621136348Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731621136348Cleaning up temporary data from old regions at 1731621136401 (+53 ms)Region opened successfully at 1731621136412 (+11 ms) 2024-11-14T21:52:16,412 INFO [master/ef788a4cade8:52351:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-14T21:52:16,417 DEBUG [master/ef788a4cade8:52351:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c3558b1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=ef788a4cade8/172.17.0.2:0 2024-11-14T21:52:16,432 INFO [master/ef788a4cade8:52351:becomeActiveMaster {}] master.HMaster(884): The info family in master local region already has data in it, skip migrating... 2024-11-14T21:52:16,432 INFO [master/ef788a4cade8:52351:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-14T21:52:16,433 INFO [master/ef788a4cade8:52351:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-14T21:52:16,433 INFO [master/ef788a4cade8:52351:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-14T21:52:16,434 INFO [master/ef788a4cade8:52351:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-14T21:52:16,440 DEBUG [master/ef788a4cade8:52351:becomeActiveMaster {}] procedure2.ProcedureExecutor(545): Completed pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-14T21:52:16,441 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-14T21:52:16,442 INFO [master/ef788a4cade8:52351:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 7 msec 2024-11-14T21:52:16,442 INFO [master/ef788a4cade8:52351:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-14T21:52:16,448 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T21:52:16,452 DEBUG [master/ef788a4cade8:52351:becomeActiveMaster {}] assignment.RegionStateStore(171): Load hbase:meta entry region=1588230740, regionState=OPEN, lastHost=ef788a4cade8,35921,1731621128264, regionLocation=ef788a4cade8,35921,1731621128264, openSeqNum=2 2024-11-14T21:52:16,452 DEBUG [master/ef788a4cade8:52351:becomeActiveMaster {}] assignment.AssignmentManager(349): Loaded hbase:meta state=OPEN, location=ef788a4cade8,35921,1731621128264, table=hbase:meta, region=1588230740 2024-11-14T21:52:16,453 INFO [master/ef788a4cade8:52351:becomeActiveMaster {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as ef788a4cade8,35921,1731621128264, state=OPEN 2024-11-14T21:52:16,454 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:52351-0x100ac6d169d0003, quorum=127.0.0.1:49891, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-14T21:52:16,455 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35045-0x100ac6d169d0002, quorum=127.0.0.1:49891, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-14T21:52:16,455 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35921-0x100ac6d169d0001, quorum=127.0.0.1:49891, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-14T21:52:16,455 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T21:52:16,455 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T21:52:16,455 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T21:52:16,460 INFO [master/ef788a4cade8:52351:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 1 possibly 'live' servers, and 0 'splitting'. 2024-11-14T21:52:16,461 INFO [master/ef788a4cade8:52351:becomeActiveMaster {}] master.ServerManager(517): Registering regionserver=ef788a4cade8,35921,1731621128264 2024-11-14T21:52:16,462 DEBUG [master/ef788a4cade8:52351:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:52351-0x100ac6d169d0003, quorum=127.0.0.1:49891, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-14T21:52:16,463 DEBUG [master/ef788a4cade8:52351:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-14T21:52:16,464 INFO [master/ef788a4cade8:52351:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-14T21:52:16,465 DEBUG [master/ef788a4cade8:52351:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:52351-0x100ac6d169d0003, quorum=127.0.0.1:49891, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-14T21:52:16,466 DEBUG [master/ef788a4cade8:52351:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-14T21:52:16,466 INFO [master/ef788a4cade8:52351:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-14T21:52:16,467 DEBUG [master/ef788a4cade8:52351:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:52351-0x100ac6d169d0003, quorum=127.0.0.1:49891, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-14T21:52:16,469 DEBUG [master/ef788a4cade8:52351:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-14T21:52:16,470 DEBUG [master/ef788a4cade8:52351:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:52351-0x100ac6d169d0003, quorum=127.0.0.1:49891, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-14T21:52:16,471 DEBUG [master/ef788a4cade8:52351:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-14T21:52:16,474 DEBUG [master/ef788a4cade8:52351:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:52351-0x100ac6d169d0003, quorum=127.0.0.1:49891, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-14T21:52:16,475 DEBUG [master/ef788a4cade8:52351:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-14T21:52:16,475 INFO [master/ef788a4cade8:52351:becomeActiveMaster {}] master.HMaster(856): Active/primary master=ef788a4cade8,52351,1731621131811, sessionid=0x100ac6d169d0003, setting cluster-up flag (Was=true) 2024-11-14T21:52:16,478 DEBUG [master/ef788a4cade8:52351:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-14T21:52:16,479 DEBUG [master/ef788a4cade8:52351:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=ef788a4cade8,52351,1731621131811 2024-11-14T21:52:16,484 DEBUG [master/ef788a4cade8:52351:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-14T21:52:16,485 DEBUG [master/ef788a4cade8:52351:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=ef788a4cade8,52351,1731621131811 2024-11-14T21:52:16,487 INFO [master/ef788a4cade8:52351:becomeActiveMaster {}] master.ServerManager(1190): begin to load .lastflushedseqids at hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/.lastflushedseqids 2024-11-14T21:52:16,491 INFO [master/ef788a4cade8:52351:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-14T21:52:16,491 INFO [master/ef788a4cade8:52351:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-14T21:52:16,492 DEBUG [master/ef788a4cade8:52351:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: ef788a4cade8,52351,1731621131811 Number of backup masters: 0 Number of live region servers: 1 ef788a4cade8,35921,1731621128264 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-14T21:52:16,493 DEBUG [master/ef788a4cade8:52351:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/ef788a4cade8:52351, corePoolSize=5, maxPoolSize=5 2024-11-14T21:52:16,493 DEBUG [master/ef788a4cade8:52351:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/ef788a4cade8:52351, corePoolSize=5, maxPoolSize=5 2024-11-14T21:52:16,493 DEBUG [master/ef788a4cade8:52351:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/ef788a4cade8:52351, corePoolSize=5, maxPoolSize=5 2024-11-14T21:52:16,494 DEBUG [master/ef788a4cade8:52351:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/ef788a4cade8:52351, corePoolSize=5, maxPoolSize=5 2024-11-14T21:52:16,494 DEBUG [master/ef788a4cade8:52351:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/ef788a4cade8:52351, corePoolSize=10, maxPoolSize=10 2024-11-14T21:52:16,494 DEBUG [master/ef788a4cade8:52351:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/ef788a4cade8:52351, corePoolSize=1, maxPoolSize=1 2024-11-14T21:52:16,494 DEBUG [master/ef788a4cade8:52351:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/ef788a4cade8:52351, corePoolSize=2, maxPoolSize=2 2024-11-14T21:52:16,494 DEBUG [master/ef788a4cade8:52351:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/ef788a4cade8:52351, corePoolSize=1, maxPoolSize=1 2024-11-14T21:52:16,494 INFO [master/ef788a4cade8:52351:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731621166494 2024-11-14T21:52:16,495 INFO [master/ef788a4cade8:52351:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-14T21:52:16,495 INFO [master/ef788a4cade8:52351:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-14T21:52:16,495 INFO [master/ef788a4cade8:52351:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-14T21:52:16,495 INFO [master/ef788a4cade8:52351:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-14T21:52:16,495 INFO [master/ef788a4cade8:52351:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-14T21:52:16,495 INFO [master/ef788a4cade8:52351:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-14T21:52:16,495 INFO [master/ef788a4cade8:52351:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-14T21:52:16,495 INFO [master/ef788a4cade8:52351:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-14T21:52:16,495 INFO [master/ef788a4cade8:52351:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-14T21:52:16,495 INFO [master/ef788a4cade8:52351:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-14T21:52:16,496 INFO [master/ef788a4cade8:52351:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-14T21:52:16,496 INFO [master/ef788a4cade8:52351:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-14T21:52:16,496 DEBUG [master/ef788a4cade8:52351:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/ef788a4cade8:52351:becomeActiveMaster-HFileCleaner.large.0-1731621136496,5,FailOnTimeoutGroup] 2024-11-14T21:52:16,496 DEBUG [master/ef788a4cade8:52351:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/ef788a4cade8:52351:becomeActiveMaster-HFileCleaner.small.0-1731621136496,5,FailOnTimeoutGroup] 2024-11-14T21:52:16,496 INFO [master/ef788a4cade8:52351:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-14T21:52:16,496 INFO [master/ef788a4cade8:52351:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-14T21:52:16,496 INFO [master/ef788a4cade8:52351:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-14T21:52:16,496 INFO [master/ef788a4cade8:52351:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-14T21:52:16,496 INFO [master/ef788a4cade8:52351:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731621136496, completionTime=-1 2024-11-14T21:52:16,499 INFO [master/ef788a4cade8:52351:becomeActiveMaster {}] master.ServerManager(883): Waiting on regionserver count=1; waited=0ms, expecting min=2 server(s), max=2 server(s), timeout=4500ms, lastChange=0ms 2024-11-14T21:52:16,833 DEBUG [Time-limited test {}] regionserver.TestRegionServerReportForDuty(349): Waiting for master to come online ... 2024-11-14T21:52:17,334 DEBUG [Time-limited test {}] regionserver.TestRegionServerReportForDuty(349): Waiting for master to come online ... 2024-11-14T21:52:17,834 DEBUG [Time-limited test {}] regionserver.TestRegionServerReportForDuty(349): Waiting for master to come online ... 2024-11-14T21:52:18,003 INFO [master/ef788a4cade8:52351:becomeActiveMaster {}] master.ServerManager(883): Waiting on regionserver count=1; waited=1507ms, expecting min=2 server(s), max=2 server(s), timeout=4500ms, lastChange=1507ms 2024-11-14T21:52:18,334 DEBUG [Time-limited test {}] regionserver.TestRegionServerReportForDuty(349): Waiting for master to come online ... 2024-11-14T21:52:18,666 INFO [RS:1;ef788a4cade8:35045 {}] regionserver.HRegionServer(2659): reportForDuty to master=ef788a4cade8,52351,1731621131811 with port=35045, startcode=1731621131286 2024-11-14T21:52:18,668 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=52351 {}] master.ServerManager(363): Checking decommissioned status of RegionServer ef788a4cade8,35045,1731621131286 2024-11-14T21:52:18,668 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=52351 {}] master.ServerManager(517): Registering regionserver=ef788a4cade8,35045,1731621131286 2024-11-14T21:52:18,670 DEBUG [RS:1;ef788a4cade8:35045 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583 2024-11-14T21:52:18,671 DEBUG [RS:1;ef788a4cade8:35045 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:36589 2024-11-14T21:52:18,671 DEBUG [RS:1;ef788a4cade8:35045 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-14T21:52:18,673 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:52351-0x100ac6d169d0003, quorum=127.0.0.1:49891, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T21:52:18,674 DEBUG [RS:1;ef788a4cade8:35045 {}] zookeeper.ZKUtil(111): regionserver:35045-0x100ac6d169d0002, quorum=127.0.0.1:49891, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/ef788a4cade8,35045,1731621131286 2024-11-14T21:52:18,674 WARN [RS:1;ef788a4cade8:35045 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T21:52:18,674 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [ef788a4cade8,35045,1731621131286] 2024-11-14T21:52:18,674 INFO [RS:1;ef788a4cade8:35045 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-14T21:52:18,674 DEBUG [RS:1;ef788a4cade8:35045 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/WALs/ef788a4cade8,35045,1731621131286 2024-11-14T21:52:18,678 INFO [RS:1;ef788a4cade8:35045 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-14T21:52:18,681 INFO [RS:1;ef788a4cade8:35045 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-14T21:52:18,696 INFO [RS:1;ef788a4cade8:35045 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-14T21:52:18,696 INFO [RS:1;ef788a4cade8:35045 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T21:52:18,698 INFO [RS:1;ef788a4cade8:35045 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-14T21:52:18,700 INFO [RS:1;ef788a4cade8:35045 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-14T21:52:18,700 INFO [RS:1;ef788a4cade8:35045 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-14T21:52:18,701 DEBUG [RS:1;ef788a4cade8:35045 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/ef788a4cade8:0, corePoolSize=1, maxPoolSize=1 2024-11-14T21:52:18,701 DEBUG [RS:1;ef788a4cade8:35045 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/ef788a4cade8:0, corePoolSize=1, maxPoolSize=1 2024-11-14T21:52:18,701 DEBUG [RS:1;ef788a4cade8:35045 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/ef788a4cade8:0, corePoolSize=1, maxPoolSize=1 2024-11-14T21:52:18,701 DEBUG [RS:1;ef788a4cade8:35045 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/ef788a4cade8:0, corePoolSize=1, maxPoolSize=1 2024-11-14T21:52:18,701 DEBUG [RS:1;ef788a4cade8:35045 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/ef788a4cade8:0, corePoolSize=1, maxPoolSize=1 2024-11-14T21:52:18,701 DEBUG [RS:1;ef788a4cade8:35045 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/ef788a4cade8:0, corePoolSize=2, maxPoolSize=2 2024-11-14T21:52:18,701 DEBUG [RS:1;ef788a4cade8:35045 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/ef788a4cade8:0, corePoolSize=1, maxPoolSize=1 2024-11-14T21:52:18,701 DEBUG [RS:1;ef788a4cade8:35045 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/ef788a4cade8:0, corePoolSize=1, maxPoolSize=1 2024-11-14T21:52:18,701 DEBUG [RS:1;ef788a4cade8:35045 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/ef788a4cade8:0, corePoolSize=1, maxPoolSize=1 2024-11-14T21:52:18,702 DEBUG [RS:1;ef788a4cade8:35045 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/ef788a4cade8:0, corePoolSize=1, maxPoolSize=1 2024-11-14T21:52:18,702 DEBUG [RS:1;ef788a4cade8:35045 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/ef788a4cade8:0, corePoolSize=1, maxPoolSize=1 2024-11-14T21:52:18,702 DEBUG [RS:1;ef788a4cade8:35045 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/ef788a4cade8:0, corePoolSize=1, maxPoolSize=1 2024-11-14T21:52:18,702 DEBUG [RS:1;ef788a4cade8:35045 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/ef788a4cade8:0, corePoolSize=3, maxPoolSize=3 2024-11-14T21:52:18,702 DEBUG [RS:1;ef788a4cade8:35045 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/ef788a4cade8:0, corePoolSize=3, maxPoolSize=3 2024-11-14T21:52:18,705 INFO [master/ef788a4cade8:52351:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=2; waited=2209ms, expected min=2 server(s), max=2 server(s), master is running 2024-11-14T21:52:18,705 DEBUG [master/ef788a4cade8:52351:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-14T21:52:18,705 DEBUG [master/ef788a4cade8:52351:becomeActiveMaster {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-14T21:52:18,705 DEBUG [master/ef788a4cade8:52351:becomeActiveMaster {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ef788a4cade8,35921,1731621128264, seqNum=-1] 2024-11-14T21:52:18,706 DEBUG [master/ef788a4cade8:52351:becomeActiveMaster {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-14T21:52:18,710 INFO [RS:1;ef788a4cade8:35045 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T21:52:18,710 INFO [RS:1;ef788a4cade8:35045 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T21:52:18,710 INFO [RS:1;ef788a4cade8:35045 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T21:52:18,710 INFO [RS:1;ef788a4cade8:35045 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-14T21:52:18,710 INFO [RS:1;ef788a4cade8:35045 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-14T21:52:18,710 INFO [RS:1;ef788a4cade8:35045 {}] hbase.ChoreService(168): Chore ScheduledChore name=ef788a4cade8,35045,1731621131286-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T21:52:18,715 INFO [HRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54211, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-14T21:52:18,727 INFO [master/ef788a4cade8:52351:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=2 2024-11-14T21:52:18,727 INFO [master/ef788a4cade8:52351:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731621198727 2024-11-14T21:52:18,728 INFO [master/ef788a4cade8:52351:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731621258728 2024-11-14T21:52:18,728 INFO [master/ef788a4cade8:52351:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 22 msec 2024-11-14T21:52:18,728 INFO [master/ef788a4cade8:52351:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef788a4cade8,52351,1731621131811-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T21:52:18,728 INFO [master/ef788a4cade8:52351:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef788a4cade8,52351,1731621131811-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T21:52:18,728 INFO [master/ef788a4cade8:52351:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef788a4cade8,52351,1731621131811-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T21:52:18,729 INFO [master/ef788a4cade8:52351:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-ef788a4cade8:52351, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T21:52:18,729 INFO [master/ef788a4cade8:52351:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-14T21:52:18,731 INFO [master/ef788a4cade8:52351:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-14T21:52:18,732 DEBUG [master/ef788a4cade8:52351.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-14T21:52:18,738 INFO [master/ef788a4cade8:52351:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 6.883sec 2024-11-14T21:52:18,742 INFO [master/ef788a4cade8:52351:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-14T21:52:18,742 INFO [master/ef788a4cade8:52351:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-14T21:52:18,742 INFO [master/ef788a4cade8:52351:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-14T21:52:18,742 INFO [master/ef788a4cade8:52351:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-14T21:52:18,742 INFO [master/ef788a4cade8:52351:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-14T21:52:18,743 INFO [master/ef788a4cade8:52351:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef788a4cade8,52351,1731621131811-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T21:52:18,743 INFO [master/ef788a4cade8:52351:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef788a4cade8,52351,1731621131811-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-14T21:52:18,748 INFO [RS:1;ef788a4cade8:35045 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-14T21:52:18,749 INFO [RS:1;ef788a4cade8:35045 {}] hbase.ChoreService(168): Chore ScheduledChore name=ef788a4cade8,35045,1731621131286-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T21:52:18,749 INFO [RS:1;ef788a4cade8:35045 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T21:52:18,749 INFO [RS:1;ef788a4cade8:35045 {}] regionserver.Replication(171): ef788a4cade8,35045,1731621131286 started 2024-11-14T21:52:18,753 DEBUG [master/ef788a4cade8:52351:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-14T21:52:18,753 INFO [master/ef788a4cade8:52351:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-14T21:52:18,753 INFO [master/ef788a4cade8:52351:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef788a4cade8,52351,1731621131811-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T21:52:18,766 INFO [RS:1;ef788a4cade8:35045 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T21:52:18,766 INFO [RS:1;ef788a4cade8:35045 {}] regionserver.HRegionServer(1482): Serving as ef788a4cade8,35045,1731621131286, RpcServer on ef788a4cade8/172.17.0.2:35045, sessionid=0x100ac6d169d0002 2024-11-14T21:52:18,767 DEBUG [RS:1;ef788a4cade8:35045 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-14T21:52:18,767 DEBUG [RS:1;ef788a4cade8:35045 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager ef788a4cade8,35045,1731621131286 2024-11-14T21:52:18,767 DEBUG [RS:1;ef788a4cade8:35045 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'ef788a4cade8,35045,1731621131286' 2024-11-14T21:52:18,767 DEBUG [RS:1;ef788a4cade8:35045 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-14T21:52:18,768 DEBUG [RS:1;ef788a4cade8:35045 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-14T21:52:18,768 DEBUG [RS:1;ef788a4cade8:35045 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-14T21:52:18,768 DEBUG [RS:1;ef788a4cade8:35045 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-14T21:52:18,768 DEBUG [RS:1;ef788a4cade8:35045 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager ef788a4cade8,35045,1731621131286 2024-11-14T21:52:18,768 DEBUG [RS:1;ef788a4cade8:35045 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'ef788a4cade8,35045,1731621131286' 2024-11-14T21:52:18,768 DEBUG [RS:1;ef788a4cade8:35045 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-14T21:52:18,769 DEBUG [RS:1;ef788a4cade8:35045 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-14T21:52:18,770 DEBUG [RS:1;ef788a4cade8:35045 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-14T21:52:18,770 INFO [RS:1;ef788a4cade8:35045 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-14T21:52:18,770 INFO [RS:1;ef788a4cade8:35045 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-14T21:52:18,834 DEBUG [Time-limited test {}] regionserver.TestRegionServerReportForDuty(349): Waiting for master to come online ... 2024-11-14T21:52:18,835 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-14T21:52:18,835 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=110999576, stopped=false 2024-11-14T21:52:18,835 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=ef788a4cade8,52351,1731621131811 2024-11-14T21:52:18,837 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35045-0x100ac6d169d0002, quorum=127.0.0.1:49891, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T21:52:18,837 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35045-0x100ac6d169d0002, quorum=127.0.0.1:49891, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T21:52:18,837 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:52351-0x100ac6d169d0003, quorum=127.0.0.1:49891, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T21:52:18,837 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35921-0x100ac6d169d0001, quorum=127.0.0.1:49891, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T21:52:18,837 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:52351-0x100ac6d169d0003, quorum=127.0.0.1:49891, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T21:52:18,837 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35921-0x100ac6d169d0001, quorum=127.0.0.1:49891, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T21:52:18,837 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-14T21:52:18,838 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-14T21:52:18,838 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.regionserver.TestRegionServerReportForDuty.tearDown(TestRegionServerReportForDuty.java:91) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T21:52:18,838 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T21:52:18,838 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35045-0x100ac6d169d0002, quorum=127.0.0.1:49891, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T21:52:18,838 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35921-0x100ac6d169d0001, quorum=127.0.0.1:49891, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T21:52:18,838 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'ef788a4cade8,35921,1731621128264' ***** 2024-11-14T21:52:18,838 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-14T21:52:18,838 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'ef788a4cade8,35045,1731621131286' ***** 2024-11-14T21:52:18,838 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-14T21:52:18,839 INFO [RS:0;ef788a4cade8:35921 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-14T21:52:18,839 INFO [RS:0;ef788a4cade8:35921 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-14T21:52:18,839 INFO [RS:0;ef788a4cade8:35921 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-14T21:52:18,839 INFO [RS:0;ef788a4cade8:35921 {}] regionserver.HRegionServer(959): stopping server ef788a4cade8,35921,1731621128264 2024-11-14T21:52:18,839 INFO [RS:0;ef788a4cade8:35921 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T21:52:18,839 INFO [RS:0;ef788a4cade8:35921 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;ef788a4cade8:35921. 2024-11-14T21:52:18,839 DEBUG [RS:0;ef788a4cade8:35921 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T21:52:18,839 DEBUG [RS:0;ef788a4cade8:35921 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T21:52:18,840 INFO [RS:0;ef788a4cade8:35921 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-14T21:52:18,840 INFO [RS:0;ef788a4cade8:35921 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-14T21:52:18,840 INFO [RS:0;ef788a4cade8:35921 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-14T21:52:18,840 INFO [RS:0;ef788a4cade8:35921 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-14T21:52:18,840 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-14T21:52:18,840 INFO [RS:0;ef788a4cade8:35921 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-14T21:52:18,840 DEBUG [RS:0;ef788a4cade8:35921 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-14T21:52:18,840 DEBUG [RS:0;ef788a4cade8:35921 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-14T21:52:18,841 INFO [RS:1;ef788a4cade8:35045 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-14T21:52:18,841 INFO [RS:1;ef788a4cade8:35045 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-14T21:52:18,841 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-14T21:52:18,841 INFO [RS:1;ef788a4cade8:35045 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-14T21:52:18,841 INFO [RS:1;ef788a4cade8:35045 {}] regionserver.HRegionServer(959): stopping server ef788a4cade8,35045,1731621131286 2024-11-14T21:52:18,841 INFO [RS:1;ef788a4cade8:35045 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T21:52:18,841 INFO [RS:1;ef788a4cade8:35045 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;ef788a4cade8:35045. 2024-11-14T21:52:18,841 DEBUG [RS:1;ef788a4cade8:35045 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T21:52:18,841 DEBUG [RS:1;ef788a4cade8:35045 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T21:52:18,841 INFO [RS:1;ef788a4cade8:35045 {}] regionserver.HRegionServer(976): stopping server ef788a4cade8,35045,1731621131286; all regions closed. 2024-11-14T21:52:18,841 DEBUG [RS_CLOSE_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-14T21:52:18,841 DEBUG [RS:1;ef788a4cade8:35045 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T21:52:18,841 INFO [RS:1;ef788a4cade8:35045 {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T21:52:18,841 INFO [RS_CLOSE_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-14T21:52:18,842 DEBUG [RS_CLOSE_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-14T21:52:18,842 INFO [RS:1;ef788a4cade8:35045 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T21:52:18,842 DEBUG [RS_CLOSE_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-14T21:52:18,842 DEBUG [RS_CLOSE_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-14T21:52:18,842 INFO [RS_CLOSE_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-14T21:52:18,842 INFO [RS:1;ef788a4cade8:35045 {}] hbase.ChoreService(370): Chore service for: regionserver/ef788a4cade8:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-14T21:52:18,842 INFO [regionserver/ef788a4cade8:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T21:52:18,843 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:52351-0x100ac6d169d0003, quorum=127.0.0.1:49891, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T21:52:18,843 INFO [RS:1;ef788a4cade8:35045 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-14T21:52:18,844 INFO [RS:1;ef788a4cade8:35045 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-14T21:52:18,844 INFO [RS:1;ef788a4cade8:35045 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-14T21:52:18,844 INFO [RS:1;ef788a4cade8:35045 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T21:52:18,845 INFO [RS:1;ef788a4cade8:35045 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35045 2024-11-14T21:52:18,847 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:52351-0x100ac6d169d0003, quorum=127.0.0.1:49891, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T21:52:18,848 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [ef788a4cade8,35045,1731621131286] 2024-11-14T21:52:18,849 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35045-0x100ac6d169d0002, quorum=127.0.0.1:49891, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/ef788a4cade8,35045,1731621131286 2024-11-14T21:52:18,849 INFO [RS:1;ef788a4cade8:35045 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T21:52:18,850 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/ef788a4cade8,35045,1731621131286 already deleted, retry=false 2024-11-14T21:52:18,850 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; ef788a4cade8,35045,1731621131286 expired; onlineServers=1 2024-11-14T21:52:18,853 INFO [regionserver/ef788a4cade8:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T21:52:18,869 DEBUG [RS_CLOSE_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/data/hbase/meta/1588230740/.tmp/ns/7e13700d2ec34c2c8f46c9cca5b7123a is 43, key is default/ns:d/1731621130994/Put/seqid=0 2024-11-14T21:52:18,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40079 is added to blk_1073741841_1018 (size=5153) 2024-11-14T21:52:18,878 INFO [RS_CLOSE_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/data/hbase/meta/1588230740/.tmp/ns/7e13700d2ec34c2c8f46c9cca5b7123a 2024-11-14T21:52:18,893 DEBUG [RS_CLOSE_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/data/hbase/meta/1588230740/.tmp/ns/7e13700d2ec34c2c8f46c9cca5b7123a as hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/data/hbase/meta/1588230740/ns/7e13700d2ec34c2c8f46c9cca5b7123a 2024-11-14T21:52:18,919 INFO [regionserver/ef788a4cade8:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T21:52:18,919 INFO [RS_CLOSE_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/data/hbase/meta/1588230740/ns/7e13700d2ec34c2c8f46c9cca5b7123a, entries=2, sequenceid=6, filesize=5.0 K 2024-11-14T21:52:18,938 INFO [RS_CLOSE_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 96ms, sequenceid=6, compaction requested=false 2024-11-14T21:52:18,947 DEBUG [RS_CLOSE_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-14T21:52:18,949 DEBUG [RS_CLOSE_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-14T21:52:18,949 INFO [RS_CLOSE_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-14T21:52:18,949 DEBUG [RS_CLOSE_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731621138841Running coprocessor pre-close hooks at 1731621138841Disabling compacts and flushes for region at 1731621138841Disabling writes for close at 1731621138842 (+1 ms)Obtaining lock to block concurrent updates at 1731621138842Preparing flush snapshotting stores in 1588230740 at 1731621138842Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1731621138843 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731621138845 (+2 ms)Flushing 1588230740/ns: creating writer at 1731621138846 (+1 ms)Flushing 1588230740/ns: appending metadata at 1731621138868 (+22 ms)Flushing 1588230740/ns: closing flushed file at 1731621138868Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@74ece062: reopening flushed file at 1731621138891 (+23 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 96ms, sequenceid=6, compaction requested=false at 1731621138939 (+48 ms)Writing region close event to WAL at 1731621138940 (+1 ms)Running coprocessor post-close hooks at 1731621138949 (+9 ms)Closed at 1731621138949 2024-11-14T21:52:18,950 DEBUG [RS_CLOSE_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-14T21:52:18,954 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35045-0x100ac6d169d0002, quorum=127.0.0.1:49891, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T21:52:18,954 INFO [RS:1;ef788a4cade8:35045 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T21:52:18,955 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35045-0x100ac6d169d0002, quorum=127.0.0.1:49891, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T21:52:18,955 INFO [RS:1;ef788a4cade8:35045 {}] regionserver.HRegionServer(1031): Exiting; stopping=ef788a4cade8,35045,1731621131286; zookeeper connection closed. 2024-11-14T21:52:18,963 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@60e09ea8 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@60e09ea8 2024-11-14T21:52:18,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40079 is added to blk_1073741834_1010 (size=1144) 2024-11-14T21:52:18,968 WARN [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@60e09ea8 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(650): complete file /user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/WALs/ef788a4cade8,35921,1731621128264/ef788a4cade8%2C35921%2C1731621128264.meta.1731621130907.meta not finished, retry = 0 2024-11-14T21:52:19,041 INFO [RS:0;ef788a4cade8:35921 {}] regionserver.HRegionServer(976): stopping server ef788a4cade8,35921,1731621128264; all regions closed. 2024-11-14T21:52:19,046 WARN [Close-WAL-Writer-0 {}] wal.AsyncProtobufLogWriter(165): normal close failed, try recover java.lang.NullPointerException: Cannot invoke "org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf.ensureWritable(int)" because "this.buf" is null at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.writeInt(FanOutOneBlockAsyncDFSOutput.java:391) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.lambda$writeWALTrailerAndMagic$3(AsyncProtobufLogWriter.java:247) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.writeWALMetadata(AsyncProtobufLogWriter.java:203) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.writeWALTrailerAndMagic(AsyncProtobufLogWriter.java:240) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractProtobufLogWriter.writeWALTrailer(AbstractProtobufLogWriter.java:252) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:162) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2041) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T21:52:19,048 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file /user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/WALs/ef788a4cade8,35921,1731621128264/ef788a4cade8%2C35921%2C1731621128264.meta.1731621130907.meta 2024-11-14T21:52:19,048 WARN [IPC Server handler 0 on default port 36589 {}] namenode.FSNamesystem(3730): BLOCK* internalReleaseLease: All existing blocks are COMPLETE, lease removed, file /user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/WALs/ef788a4cade8,35921,1731621128264/ef788a4cade8%2C35921%2C1731621128264.meta.1731621130907.meta closed. 2024-11-14T21:52:19,049 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/WALs/ef788a4cade8,35921,1731621128264/ef788a4cade8%2C35921%2C1731621128264.meta.1731621130907.meta after 1ms 2024-11-14T21:52:19,052 DEBUG [RS:0;ef788a4cade8:35921 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/oldWALs 2024-11-14T21:52:19,052 INFO [RS:0;ef788a4cade8:35921 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL ef788a4cade8%2C35921%2C1731621128264.meta:.meta(num 1731621130907) 2024-11-14T21:52:19,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40079 is added to blk_1073741833_1009 (size=93) 2024-11-14T21:52:19,057 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(650): complete file /user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/WALs/ef788a4cade8,35921,1731621128264/ef788a4cade8%2C35921%2C1731621128264.1731621130285 not finished, retry = 0 2024-11-14T21:52:19,069 ERROR [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@60e09ea8 {}] hdfs.DFSClient(665): Failed to close file: /user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/WALs/ef788a4cade8,35921,1731621128264/ef788a4cade8%2C35921%2C1731621128264.1731621130285 with renewLeaseKey: DEFAULT_16431 java.io.IOException: stream already broken at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.endBlock(FanOutOneBlockAsyncDFSOutput.java:566) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:615) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FilterFileSystem.close(FilterFileSystem.java:530) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.fs.HFileSystem.close(HFileSystem.java:228) ~[classes/:?] at org.apache.hadoop.hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread.run(SingleProcessHBaseCluster.java:212) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-14T21:52:19,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40079 is added to blk_1073741840_1017 (size=214) 2024-11-14T21:52:19,142 INFO [regionserver/ef788a4cade8:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-14T21:52:19,142 INFO [regionserver/ef788a4cade8:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-14T21:52:19,161 ERROR [RS:0;ef788a4cade8:35921 {}] wal.AbstractWALProvider(289): cleanup WAL failed java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.listPaths(DFSClient.java:1731) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.listPaths(DFSClient.java:1717) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.listStatusInternal(DistributedFileSystem.java:1112) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.access$600(DistributedFileSystem.java:148) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$24.doCall(DistributedFileSystem.java:1187) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$24.doCall(DistributedFileSystem.java:1184) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.listStatus(DistributedFileSystem.java:1194) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystem.listStatus(FileSystem.java:2078) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystem.listStatus(FileSystem.java:2122) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.util.CommonFSUtils.listStatus(CommonFSUtils.java:602) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.getFiles(AbstractFSWAL.java:1155) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.close(AbstractFSWAL.java:1235) ~[classes/:?] at org.apache.hadoop.hbase.wal.AbstractFSWALProvider.close0(AbstractFSWALProvider.java:170) ~[classes/:?] at org.apache.hadoop.hbase.wal.AbstractWALProvider.cleanup(AbstractWALProvider.java:287) ~[classes/:?] at org.apache.hadoop.hbase.wal.AbstractWALProvider.close(AbstractWALProvider.java:304) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.close(WALFactory.java:302) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegionServer.shutdownWAL(HRegionServer.java:1380) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:990) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T21:52:19,161 ERROR [RS:0;ef788a4cade8:35921 {}] regionserver.HRegionServer(1386): Shutdown / close of WAL failed: java.io.IOException: Failed to close WALFactory 2024-11-14T21:52:19,161 DEBUG [RS:0;ef788a4cade8:35921 {}] regionserver.HRegionServer(1387): Shutdown / close exception details: java.io.IOException: Failed to close WALFactory at org.apache.hadoop.hbase.wal.WALFactory.close(WALFactory.java:308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegionServer.shutdownWAL(HRegionServer.java:1380) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:990) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Suppressed: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.listPaths(DFSClient.java:1731) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.listPaths(DFSClient.java:1717) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.listStatusInternal(DistributedFileSystem.java:1112) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.access$600(DistributedFileSystem.java:148) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$24.doCall(DistributedFileSystem.java:1187) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$24.doCall(DistributedFileSystem.java:1184) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.listStatus(DistributedFileSystem.java:1194) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystem.listStatus(FileSystem.java:2078) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystem.listStatus(FileSystem.java:2122) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.util.CommonFSUtils.listStatus(CommonFSUtils.java:602) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.getFiles(AbstractFSWAL.java:1155) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.close(AbstractFSWAL.java:1235) ~[classes/:?] at org.apache.hadoop.hbase.wal.AbstractFSWALProvider.close0(AbstractFSWALProvider.java:170) ~[classes/:?] at org.apache.hadoop.hbase.wal.AbstractWALProvider.cleanup(AbstractWALProvider.java:287) ~[classes/:?] at org.apache.hadoop.hbase.wal.AbstractWALProvider.close(AbstractWALProvider.java:304) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.close(WALFactory.java:302) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegionServer.shutdownWAL(HRegionServer.java:1380) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:990) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T21:52:19,162 DEBUG [RS:0;ef788a4cade8:35921 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T21:52:19,162 INFO [RS:0;ef788a4cade8:35921 {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T21:52:19,162 INFO [RS:0;ef788a4cade8:35921 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T21:52:19,162 INFO [RS:0;ef788a4cade8:35921 {}] hbase.ChoreService(370): Chore service for: regionserver/ef788a4cade8:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-14T21:52:19,162 INFO [RS:0;ef788a4cade8:35921 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T21:52:19,163 INFO [RS:0;ef788a4cade8:35921 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35921 2024-11-14T21:52:19,163 INFO [regionserver/ef788a4cade8:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T21:52:19,165 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:52351-0x100ac6d169d0003, quorum=127.0.0.1:49891, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T21:52:19,165 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35921-0x100ac6d169d0001, quorum=127.0.0.1:49891, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/ef788a4cade8,35921,1731621128264 2024-11-14T21:52:19,165 INFO [RS:0;ef788a4cade8:35921 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T21:52:19,166 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [ef788a4cade8,35921,1731621128264] 2024-11-14T21:52:19,169 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/ef788a4cade8,35921,1731621128264 already deleted, retry=false 2024-11-14T21:52:19,169 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; ef788a4cade8,35921,1731621128264 expired; onlineServers=0 2024-11-14T21:52:19,169 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'ef788a4cade8,52351,1731621131811' ***** 2024-11-14T21:52:19,169 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-14T21:52:19,169 INFO [M:1;ef788a4cade8:52351 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T21:52:19,169 INFO [M:1;ef788a4cade8:52351 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T21:52:19,169 DEBUG [M:1;ef788a4cade8:52351 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-14T21:52:19,170 DEBUG [M:1;ef788a4cade8:52351 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-14T21:52:19,170 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-14T21:52:19,170 DEBUG [master/ef788a4cade8:52351:becomeActiveMaster-HFileCleaner.large.0-1731621136496 {}] cleaner.HFileCleaner(306): Exit Thread[master/ef788a4cade8:52351:becomeActiveMaster-HFileCleaner.large.0-1731621136496,5,FailOnTimeoutGroup] 2024-11-14T21:52:19,170 DEBUG [master/ef788a4cade8:52351:becomeActiveMaster-HFileCleaner.small.0-1731621136496 {}] cleaner.HFileCleaner(306): Exit Thread[master/ef788a4cade8:52351:becomeActiveMaster-HFileCleaner.small.0-1731621136496,5,FailOnTimeoutGroup] 2024-11-14T21:52:19,170 INFO [M:1;ef788a4cade8:52351 {}] hbase.ChoreService(370): Chore service for: master/ef788a4cade8:52351 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-14T21:52:19,170 INFO [M:1;ef788a4cade8:52351 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T21:52:19,170 DEBUG [M:1;ef788a4cade8:52351 {}] master.HMaster(1795): Stopping service threads 2024-11-14T21:52:19,170 INFO [M:1;ef788a4cade8:52351 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-14T21:52:19,170 INFO [M:1;ef788a4cade8:52351 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-14T21:52:19,170 INFO [M:1;ef788a4cade8:52351 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-14T21:52:19,171 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-14T21:52:19,171 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:52351-0x100ac6d169d0003, quorum=127.0.0.1:49891, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-14T21:52:19,171 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:52351-0x100ac6d169d0003, quorum=127.0.0.1:49891, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T21:52:19,172 DEBUG [M:1;ef788a4cade8:52351 {}] zookeeper.ZKUtil(347): master:52351-0x100ac6d169d0003, quorum=127.0.0.1:49891, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-14T21:52:19,172 WARN [M:1;ef788a4cade8:52351 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-14T21:52:19,173 INFO [M:1;ef788a4cade8:52351 {}] master.ServerManager(1134): Rewriting .lastflushedseqids file at: hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/.lastflushedseqids 2024-11-14T21:52:19,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40079 is added to blk_1073741842_1019 (size=99) 2024-11-14T21:52:19,190 INFO [M:1;ef788a4cade8:52351 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-14T21:52:19,190 INFO [M:1;ef788a4cade8:52351 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-14T21:52:19,190 DEBUG [M:1;ef788a4cade8:52351 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-14T21:52:19,191 INFO [M:1;ef788a4cade8:52351 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T21:52:19,191 DEBUG [M:1;ef788a4cade8:52351 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T21:52:19,191 DEBUG [M:1;ef788a4cade8:52351 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-14T21:52:19,191 DEBUG [M:1;ef788a4cade8:52351 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T21:52:19,191 INFO [M:1;ef788a4cade8:52351 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=65 B heapSize=1.14 KB 2024-11-14T21:52:19,194 ERROR [AsyncFSWAL-0-hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData-prefix:ef788a4cade8,52351,1731621131811 {}] server.NIOServerCnxnFactory(85): Thread Thread[AsyncFSWAL-0-hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData-prefix:ef788a4cade8,52351,1731621131811,5,FailOnTimeoutGroup] died java.lang.NullPointerException: Cannot invoke "org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf.readableBytes()" because "this.buf" is null at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.buffered(FanOutOneBlockAsyncDFSOutput.java:414) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.append(AsyncProtobufLogWriter.java:134) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doAppend(AsyncFSWAL.java:181) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doAppend(AsyncFSWAL.java:100) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.appendEntry(AbstractFSWAL.java:1333) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.appendAndSync(AbstractFSWAL.java:1724) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.consume(AbstractFSWAL.java:1832) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T21:52:19,269 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35921-0x100ac6d169d0001, quorum=127.0.0.1:49891, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T21:52:19,269 INFO [RS:0;ef788a4cade8:35921 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T21:52:19,269 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35921-0x100ac6d169d0001, quorum=127.0.0.1:49891, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T21:52:19,269 INFO [RS:0;ef788a4cade8:35921 {}] regionserver.HRegionServer(1031): Exiting; stopping=ef788a4cade8,35921,1731621128264; zookeeper connection closed. 2024-11-14T21:52:19,269 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 2 master(s) and 2 regionserver(s) complete 2024-11-14T21:52:24,451 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-14T21:52:24,454 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T21:52:37,803 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-14T21:53:07,803 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:1;ef788a4cade8:52351 164 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 6 Waited count: 7 Waiting on java.lang.ref.ReferenceQueue$Lock@1f2a738a Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 9 Waited count: 11 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@306bc87a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 860 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 9 Waiting on java.util.concurrent.CountDownLatch$Sync@d62f5b1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 20 (Time-limited test): State: RUNNABLE Blocked count: 147 Waited count: 205 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.regionserver.TestRegionServerReportForDuty.tearDown(TestRegionServerReportForDuty.java:92) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) app//org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 1 Waited count: 2 Waiting on java.lang.ref.ReferenceQueue$Lock@7194b96c Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@1f52973c Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 17 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 127 (ForkJoinPool-2-worker-1): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.ForkJoinPool@1d897d5d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 128 (ForkJoinPool-2-worker-2): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 151 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@7dcdc20 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 152 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 90 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 185 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@58b7cc5f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 186 (HRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 2 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 207 (RSDelayedStart-pool-0): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@e022181 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 210 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 212 (master/ef788a4cade8:61512:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 214 (master/ef788a4cade8:61512:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 216 (org.apache.hadoop.hdfs.PeerCache@d5c1683): State: TIMED_WAITING Blocked count: 0 Waited count: 27 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 235 (HRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 244 (Idle-Rpc-Conn-Sweeper-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (SnapshotHandlerChoreCleaner): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7f913817 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 243 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 7677 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (regionserver/ef788a4cade8:0.procedureResultReporter): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@665bcd71 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 290 (HRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 297 (region-location-0): State: WAITING Blocked count: 4 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7f61805a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 344 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@604512c2): State: TIMED_WAITING Blocked count: 0 Waited count: 145 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 345 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 15 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 346 (pool-125-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 347 (qtp450110968-347): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f97c4445da8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 348 (qtp450110968-348): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f97c4445da8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 349 (qtp450110968-349): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f97c4445da8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 350 (qtp450110968-350): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f97c4445da8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 351 (qtp450110968-351-acceptor-0@7445fad8-ServerConnector@6fcf9162{HTTP/1.1, (http/1.1)}{localhost:41375}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 352 (qtp450110968-352): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 353 (qtp450110968-353): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 354 (qtp450110968-354): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 355 (Session-HouseKeeper-2093133b-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 356 (pool-126-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 363 (FSEditLogAsync): State: WAITING Blocked count: 0 Waited count: 129 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5a8c5076 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 365 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 366 (IPC Server idle connection scanner for port 36589): State: TIMED_WAITING Blocked count: 1 Waited count: 9 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 368 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 15 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 371 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@66ed21f0): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 372 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 24 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 359 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@2c5dcaa2): State: TIMED_WAITING Blocked count: 0 Waited count: 15 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 357 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 24 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 358 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 7131 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 361 (Block report processor): State: WAITING Blocked count: 0 Waited count: 27 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2043c6bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 367 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 364 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 374 (IPC Server handler 0 on default port 36589): State: TIMED_WAITING Blocked count: 1 Waited count: 166 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 375 (IPC Server handler 1 on default port 36589): State: TIMED_WAITING Blocked count: 0 Waited count: 159 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 376 (IPC Server handler 2 on default port 36589): State: TIMED_WAITING Blocked count: 0 Waited count: 159 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 377 (IPC Server handler 3 on default port 36589): State: TIMED_WAITING Blocked count: 6 Waited count: 159 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 378 (IPC Server handler 4 on default port 36589): State: TIMED_WAITING Blocked count: 1 Waited count: 162 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 379 (pool-131-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 381 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@2baab6a): State: TIMED_WAITING Blocked count: 0 Waited count: 36 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 382 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@704eea15): State: TIMED_WAITING Blocked count: 0 Waited count: 15 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 383 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@50177aa1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 384 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@239b419c): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 385 (CacheReplicationMonitor(516992859)): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 395 (pool-137-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 396 (qtp1990604458-396): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f97c4445da8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 397 (qtp1990604458-397-acceptor-0@5988a3f5-ServerConnector@4efd02b9{HTTP/1.1, (http/1.1)}{localhost:46177}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 398 (qtp1990604458-398): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 399 (qtp1990604458-399): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 400 (Session-HouseKeeper-290b945b-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 401 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 402 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@4f43a893): State: TIMED_WAITING Blocked count: 0 Waited count: 144 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 404 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 405 (IPC Server idle connection scanner for port 37595): State: TIMED_WAITING Blocked count: 1 Waited count: 9 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 407 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 15 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 410 (Command processor): State: WAITING Blocked count: 0 Waited count: 26 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7f93961c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 411 (BP-371475406-172.17.0.2-1731621127055 heartbeating to localhost/127.0.0.1:36589): State: TIMED_WAITING Blocked count: 73 Waited count: 98 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 412 (pool-139-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 413 (IPC Client (1512999374) connection to localhost/127.0.0.1:36589 from jenkins): State: TIMED_WAITING Blocked count: 48 Waited count: 49 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 394 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@247ec4bb): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 414 (IPC Parameter Sending Thread for localhost/127.0.0.1:36589): State: TIMED_WAITING Blocked count: 0 Waited count: 49 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 406 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 403 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 415 (IPC Server handler 0 on default port 37595): State: TIMED_WAITING Blocked count: 0 Waited count: 72 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 416 (IPC Server handler 1 on default port 37595): State: TIMED_WAITING Blocked count: 0 Waited count: 79 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 417 (IPC Server handler 2 on default port 37595): State: TIMED_WAITING Blocked count: 0 Waited count: 79 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 418 (IPC Server handler 3 on default port 37595): State: TIMED_WAITING Blocked count: 0 Waited count: 76 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 419 (IPC Server handler 4 on default port 37595): State: TIMED_WAITING Blocked count: 0 Waited count: 73 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 424 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c9ddaa9-300f-24a5-4986-3711490f80fe/cluster_1a016d2a-59f3-dbee-2f24-f11810945b1c/data/data1)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 425 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c9ddaa9-300f-24a5-4986-3711490f80fe/cluster_1a016d2a-59f3-dbee-2f24-f11810945b1c/data/data2)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 430 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c9ddaa9-300f-24a5-4986-3711490f80fe/cluster_1a016d2a-59f3-dbee-2f24-f11810945b1c/data/data1/current/BP-371475406-172.17.0.2-1731621127055): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 431 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c9ddaa9-300f-24a5-4986-3711490f80fe/cluster_1a016d2a-59f3-dbee-2f24-f11810945b1c/data/data2/current/BP-371475406-172.17.0.2-1731621127055): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 435 (pool-134-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 439 (java.util.concurrent.ThreadPoolExecutor$Worker@33026071[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 440 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 443 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 442 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 444 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:49891): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 441 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 445 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 36 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 446 (SyncThread:0): State: WAITING Blocked count: 4 Waited count: 241 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@49e3b6b0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 447 (ProcessThread(sid:0 cport:49891):): State: WAITING Blocked count: 0 Waited count: 272 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@562d6fcb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 448 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 276 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@25ad8a21 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 449 (NIOWorkerThread-1): State: WAITING Blocked count: 2 Waited count: 40 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72c65cb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 451 (LeaseRenewer:jenkins@localhost:36589): State: TIMED_WAITING Blocked count: 0 Waited count: 72 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 456 (HMaster-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 459 (NIOWorkerThread-2): State: WAITING Blocked count: 1 Waited count: 40 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72c65cb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 460 (NIOWorkerThread-3): State: WAITING Blocked count: 0 Waited count: 40 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72c65cb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 461 (NIOWorkerThread-4): State: WAITING Blocked count: 0 Waited count: 40 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72c65cb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 463 (NIOWorkerThread-5): State: WAITING Blocked count: 0 Waited count: 39 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72c65cb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 464 (NIOWorkerThread-6): State: WAITING Blocked count: 0 Waited count: 39 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72c65cb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 465 (NIOWorkerThread-7): State: WAITING Blocked count: 0 Waited count: 40 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72c65cb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 466 (NIOWorkerThread-8): State: WAITING Blocked count: 0 Waited count: 39 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72c65cb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 467 (NIOWorkerThread-9): State: WAITING Blocked count: 0 Waited count: 39 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72c65cb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 468 (NIOWorkerThread-10): State: WAITING Blocked count: 0 Waited count: 39 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72c65cb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 469 (NIOWorkerThread-11): State: WAITING Blocked count: 0 Waited count: 40 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72c65cb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 470 (NIOWorkerThread-12): State: WAITING Blocked count: 0 Waited count: 39 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72c65cb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 471 (NIOWorkerThread-13): State: WAITING Blocked count: 0 Waited count: 39 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72c65cb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 472 (NIOWorkerThread-14): State: WAITING Blocked count: 0 Waited count: 39 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72c65cb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 473 (NIOWorkerThread-15): State: WAITING Blocked count: 1 Waited count: 39 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72c65cb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 474 (NIOWorkerThread-16): State: WAITING Blocked count: 0 Waited count: 39 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72c65cb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 488 (HRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 533 (HRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 552 (SnapshotHandlerChoreCleaner): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@26c4226a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 560 (HMaster-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 573 (regionserver/ef788a4cade8:0.procedureResultReporter): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1b2a524e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 585 (HRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 592 (region-location-0): State: WAITING Blocked count: 4 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@521f2f88 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 594 (MyRegionServer-EventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 618 (HMaster-EventLoopGroup-7-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 620 (Time-limited test-SendThread(127.0.0.1:49891)): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 621 (Time-limited test-EventThread): State: WAITING Blocked count: 0 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@12fcea83 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 625 (zk-event-processor-pool-0): State: WAITING Blocked count: 8 Waited count: 19 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15ac4dc4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 627 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=52351): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@5c5dc51d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 628 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=52351): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@7715d271 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 629 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=52351): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@24896e2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 630 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=52351): State: WAITING Blocked count: 0 Waited count: 61 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@757d3902 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 631 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=52351): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@473a7f9d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 632 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=52351): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@473a7f9d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 633 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=52351): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@1ee5bb51 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 634 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=52351): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@585cd1e9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 635 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=52351): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@44f556c9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 636 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=52351): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@50271f5d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 637 (M:1;ef788a4cade8:52351): State: TIMED_WAITING Blocked count: 13 Waited count: 87 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$812/0x00007f97c4b96948.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 649 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 670 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 650 (MyRegionServer-EventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 651 (HMaster-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 652 (MyRegionServer-EventLoopGroup-6-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 653 (HMaster-EventLoopGroup-7-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 668 (HMaster-EventLoopGroup-7-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 691 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 699 (Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c9ddaa9-300f-24a5-4986-3711490f80fe/cluster_1a016d2a-59f3-dbee-2f24-f11810945b1c/data/data1): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 700 (RegionServerTracker-0): State: WAITING Blocked count: 7 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60871d07 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 706 (regionserver/ef788a4cade8:0.procedureResultReporter): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@fd78196 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 711 (region-location-0): State: WAITING Blocked count: 4 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@641d203c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 729 (AsyncFSWAL-1-hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData-prefix:ef788a4cade8,52351,1731621131811): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4ec5cb3c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 730 (Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c9ddaa9-300f-24a5-4986-3711490f80fe/cluster_1a016d2a-59f3-dbee-2f24-f11810945b1c/data/data2): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 731 (Timer for 'DataNode' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-11-14T21:53:37,803 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-14T21:54:07,804 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:1;ef788a4cade8:52351 160 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 6 Waited count: 7 Waiting on java.lang.ref.ReferenceQueue$Lock@1f2a738a Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 9 Waited count: 12 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@306bc87a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 1459 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 15 Waiting on java.util.concurrent.CountDownLatch$Sync@288ba8e0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 20 (Time-limited test): State: RUNNABLE Blocked count: 147 Waited count: 206 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.regionserver.TestRegionServerReportForDuty.tearDown(TestRegionServerReportForDuty.java:92) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) app//org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 1 Waited count: 2 Waiting on java.lang.ref.ReferenceQueue$Lock@7194b96c Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@1f52973c Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 29 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 127 (ForkJoinPool-2-worker-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 151 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@7dcdc20 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 152 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 162 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 185 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@58b7cc5f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 186 (HRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 2 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 207 (RSDelayedStart-pool-0): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@e022181 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 210 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 14 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 212 (master/ef788a4cade8:61512:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 214 (master/ef788a4cade8:61512:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 216 (org.apache.hadoop.hdfs.PeerCache@d5c1683): State: TIMED_WAITING Blocked count: 0 Waited count: 47 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 235 (HRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 244 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@608013af Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (SnapshotHandlerChoreCleaner): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7f913817 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 243 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 13679 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (regionserver/ef788a4cade8:0.procedureResultReporter): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@665bcd71 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 290 (HRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 297 (region-location-0): State: WAITING Blocked count: 4 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7f61805a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 344 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@604512c2): State: TIMED_WAITING Blocked count: 0 Waited count: 265 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 345 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 27 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 346 (pool-125-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 347 (qtp450110968-347): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f97c4445da8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 348 (qtp450110968-348): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f97c4445da8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 349 (qtp450110968-349): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f97c4445da8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 350 (qtp450110968-350): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f97c4445da8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 351 (qtp450110968-351-acceptor-0@7445fad8-ServerConnector@6fcf9162{HTTP/1.1, (http/1.1)}{localhost:41375}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 352 (qtp450110968-352): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 353 (qtp450110968-353): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 354 (qtp450110968-354): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 355 (Session-HouseKeeper-2093133b-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 356 (pool-126-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 363 (FSEditLogAsync): State: WAITING Blocked count: 0 Waited count: 129 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5a8c5076 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 365 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 366 (IPC Server idle connection scanner for port 36589): State: TIMED_WAITING Blocked count: 1 Waited count: 15 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 368 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 27 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 371 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@66ed21f0): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 372 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 44 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 359 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@2c5dcaa2): State: TIMED_WAITING Blocked count: 0 Waited count: 27 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 357 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 44 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 358 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 13076 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 361 (Block report processor): State: WAITING Blocked count: 0 Waited count: 27 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2043c6bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 367 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 364 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 374 (IPC Server handler 0 on default port 36589): State: TIMED_WAITING Blocked count: 1 Waited count: 226 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 375 (IPC Server handler 1 on default port 36589): State: TIMED_WAITING Blocked count: 0 Waited count: 219 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 376 (IPC Server handler 2 on default port 36589): State: TIMED_WAITING Blocked count: 0 Waited count: 219 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 377 (IPC Server handler 3 on default port 36589): State: TIMED_WAITING Blocked count: 6 Waited count: 219 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 378 (IPC Server handler 4 on default port 36589): State: TIMED_WAITING Blocked count: 1 Waited count: 222 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 379 (pool-131-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 381 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@2baab6a): State: TIMED_WAITING Blocked count: 0 Waited count: 66 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 382 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@704eea15): State: TIMED_WAITING Blocked count: 0 Waited count: 27 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 383 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@50177aa1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 384 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@239b419c): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 385 (CacheReplicationMonitor(516992859)): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 395 (pool-137-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 396 (qtp1990604458-396): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f97c4445da8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 397 (qtp1990604458-397-acceptor-0@5988a3f5-ServerConnector@4efd02b9{HTTP/1.1, (http/1.1)}{localhost:46177}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 398 (qtp1990604458-398): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 399 (qtp1990604458-399): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 400 (Session-HouseKeeper-290b945b-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 401 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 402 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@4f43a893): State: TIMED_WAITING Blocked count: 0 Waited count: 264 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 404 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 405 (IPC Server idle connection scanner for port 37595): State: TIMED_WAITING Blocked count: 1 Waited count: 15 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 407 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 27 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 410 (Command processor): State: WAITING Blocked count: 0 Waited count: 46 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7f93961c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 411 (BP-371475406-172.17.0.2-1731621127055 heartbeating to localhost/127.0.0.1:36589): State: TIMED_WAITING Blocked count: 93 Waited count: 138 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 412 (pool-139-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 413 (IPC Client (1512999374) connection to localhost/127.0.0.1:36589 from jenkins): State: TIMED_WAITING Blocked count: 68 Waited count: 69 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 394 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@247ec4bb): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 414 (IPC Parameter Sending Thread for localhost/127.0.0.1:36589): State: TIMED_WAITING Blocked count: 0 Waited count: 69 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 406 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 403 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 415 (IPC Server handler 0 on default port 37595): State: TIMED_WAITING Blocked count: 0 Waited count: 137 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 416 (IPC Server handler 1 on default port 37595): State: TIMED_WAITING Blocked count: 0 Waited count: 149 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 417 (IPC Server handler 2 on default port 37595): State: TIMED_WAITING Blocked count: 0 Waited count: 142 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 418 (IPC Server handler 3 on default port 37595): State: TIMED_WAITING Blocked count: 0 Waited count: 143 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 419 (IPC Server handler 4 on default port 37595): State: TIMED_WAITING Blocked count: 0 Waited count: 136 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 424 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c9ddaa9-300f-24a5-4986-3711490f80fe/cluster_1a016d2a-59f3-dbee-2f24-f11810945b1c/data/data1)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 425 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c9ddaa9-300f-24a5-4986-3711490f80fe/cluster_1a016d2a-59f3-dbee-2f24-f11810945b1c/data/data2)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 430 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c9ddaa9-300f-24a5-4986-3711490f80fe/cluster_1a016d2a-59f3-dbee-2f24-f11810945b1c/data/data1/current/BP-371475406-172.17.0.2-1731621127055): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 431 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c9ddaa9-300f-24a5-4986-3711490f80fe/cluster_1a016d2a-59f3-dbee-2f24-f11810945b1c/data/data2/current/BP-371475406-172.17.0.2-1731621127055): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 435 (pool-134-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 439 (java.util.concurrent.ThreadPoolExecutor$Worker@33026071[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 440 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 5 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 443 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 442 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 444 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:49891): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 441 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 14 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 445 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 66 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 446 (SyncThread:0): State: WAITING Blocked count: 4 Waited count: 246 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@49e3b6b0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 447 (ProcessThread(sid:0 cport:49891):): State: WAITING Blocked count: 0 Waited count: 277 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@562d6fcb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 448 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 281 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@25ad8a21 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 449 (NIOWorkerThread-1): State: WAITING Blocked count: 2 Waited count: 41 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72c65cb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 456 (HMaster-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 459 (NIOWorkerThread-2): State: WAITING Blocked count: 1 Waited count: 40 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72c65cb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 460 (NIOWorkerThread-3): State: WAITING Blocked count: 0 Waited count: 40 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72c65cb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 461 (NIOWorkerThread-4): State: WAITING Blocked count: 0 Waited count: 40 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72c65cb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 463 (NIOWorkerThread-5): State: WAITING Blocked count: 0 Waited count: 40 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72c65cb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 464 (NIOWorkerThread-6): State: WAITING Blocked count: 0 Waited count: 40 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72c65cb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 465 (NIOWorkerThread-7): State: WAITING Blocked count: 0 Waited count: 40 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72c65cb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 466 (NIOWorkerThread-8): State: WAITING Blocked count: 0 Waited count: 40 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72c65cb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 467 (NIOWorkerThread-9): State: WAITING Blocked count: 0 Waited count: 40 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72c65cb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 468 (NIOWorkerThread-10): State: WAITING Blocked count: 0 Waited count: 40 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72c65cb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 469 (NIOWorkerThread-11): State: WAITING Blocked count: 0 Waited count: 40 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72c65cb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 470 (NIOWorkerThread-12): State: WAITING Blocked count: 0 Waited count: 40 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72c65cb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 471 (NIOWorkerThread-13): State: WAITING Blocked count: 0 Waited count: 40 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72c65cb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 472 (NIOWorkerThread-14): State: WAITING Blocked count: 0 Waited count: 40 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72c65cb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 473 (NIOWorkerThread-15): State: WAITING Blocked count: 1 Waited count: 39 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72c65cb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 474 (NIOWorkerThread-16): State: WAITING Blocked count: 0 Waited count: 40 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72c65cb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 488 (HRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 533 (HRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 552 (SnapshotHandlerChoreCleaner): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@26c4226a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 560 (HMaster-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 573 (regionserver/ef788a4cade8:0.procedureResultReporter): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1b2a524e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 585 (HRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 592 (region-location-0): State: WAITING Blocked count: 4 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@521f2f88 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 594 (MyRegionServer-EventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 618 (HMaster-EventLoopGroup-7-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 620 (Time-limited test-SendThread(127.0.0.1:49891)): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 621 (Time-limited test-EventThread): State: WAITING Blocked count: 0 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@12fcea83 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 625 (zk-event-processor-pool-0): State: WAITING Blocked count: 8 Waited count: 19 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15ac4dc4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 627 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=52351): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@5c5dc51d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 628 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=52351): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@7715d271 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 629 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=52351): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@24896e2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 630 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=52351): State: WAITING Blocked count: 0 Waited count: 61 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@757d3902 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 631 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=52351): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@473a7f9d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 632 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=52351): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@473a7f9d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 633 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=52351): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@1ee5bb51 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 634 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=52351): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@585cd1e9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 635 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=52351): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@44f556c9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 636 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=52351): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@50271f5d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 637 (M:1;ef788a4cade8:52351): State: TIMED_WAITING Blocked count: 13 Waited count: 87 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$812/0x00007f97c4b96948.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 649 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 1269 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 650 (MyRegionServer-EventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 651 (HMaster-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 652 (MyRegionServer-EventLoopGroup-6-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 653 (HMaster-EventLoopGroup-7-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 668 (HMaster-EventLoopGroup-7-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 691 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 700 (RegionServerTracker-0): State: WAITING Blocked count: 7 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60871d07 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 706 (regionserver/ef788a4cade8:0.procedureResultReporter): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@fd78196 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 711 (region-location-0): State: WAITING Blocked count: 4 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@641d203c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 729 (AsyncFSWAL-1-hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData-prefix:ef788a4cade8,52351,1731621131811): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4ec5cb3c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 731 (Timer for 'DataNode' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-11-14T21:54:37,804 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-14T21:55:07,804 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:1;ef788a4cade8:52351 159 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 6 Waited count: 7 Waiting on java.lang.ref.ReferenceQueue$Lock@1f2a738a Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 9 Waited count: 13 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@306bc87a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 2059 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 21 Waiting on java.util.concurrent.CountDownLatch$Sync@48f71ae6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 20 (Time-limited test): State: RUNNABLE Blocked count: 147 Waited count: 207 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.regionserver.TestRegionServerReportForDuty.tearDown(TestRegionServerReportForDuty.java:92) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) app//org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 1 Waited count: 2 Waiting on java.lang.ref.ReferenceQueue$Lock@7194b96c Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@1f52973c Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 41 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 151 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@7dcdc20 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 152 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 234 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 185 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@58b7cc5f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 186 (HRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 2 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 207 (RSDelayedStart-pool-0): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@e022181 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 210 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 20 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 212 (master/ef788a4cade8:61512:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 214 (master/ef788a4cade8:61512:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 216 (org.apache.hadoop.hdfs.PeerCache@d5c1683): State: TIMED_WAITING Blocked count: 0 Waited count: 67 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 235 (HRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 244 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@608013af Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (SnapshotHandlerChoreCleaner): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7f913817 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 243 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 19682 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (regionserver/ef788a4cade8:0.procedureResultReporter): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@665bcd71 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 290 (HRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 297 (region-location-0): State: WAITING Blocked count: 4 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7f61805a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 344 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@604512c2): State: TIMED_WAITING Blocked count: 0 Waited count: 385 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 345 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 39 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 346 (pool-125-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 347 (qtp450110968-347): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f97c4445da8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 348 (qtp450110968-348): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f97c4445da8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 349 (qtp450110968-349): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f97c4445da8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 350 (qtp450110968-350): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f97c4445da8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 351 (qtp450110968-351-acceptor-0@7445fad8-ServerConnector@6fcf9162{HTTP/1.1, (http/1.1)}{localhost:41375}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 352 (qtp450110968-352): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 353 (qtp450110968-353): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 354 (qtp450110968-354): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 355 (Session-HouseKeeper-2093133b-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 356 (pool-126-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 363 (FSEditLogAsync): State: WAITING Blocked count: 0 Waited count: 129 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5a8c5076 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 365 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 366 (IPC Server idle connection scanner for port 36589): State: TIMED_WAITING Blocked count: 1 Waited count: 21 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 368 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 39 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 371 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@66ed21f0): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 372 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 64 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 359 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@2c5dcaa2): State: TIMED_WAITING Blocked count: 0 Waited count: 39 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 357 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 64 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 358 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 19026 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 361 (Block report processor): State: WAITING Blocked count: 0 Waited count: 27 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2043c6bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 367 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 364 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 374 (IPC Server handler 0 on default port 36589): State: TIMED_WAITING Blocked count: 1 Waited count: 286 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 375 (IPC Server handler 1 on default port 36589): State: TIMED_WAITING Blocked count: 0 Waited count: 279 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 376 (IPC Server handler 2 on default port 36589): State: TIMED_WAITING Blocked count: 0 Waited count: 279 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 377 (IPC Server handler 3 on default port 36589): State: TIMED_WAITING Blocked count: 6 Waited count: 279 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 378 (IPC Server handler 4 on default port 36589): State: TIMED_WAITING Blocked count: 1 Waited count: 282 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 379 (pool-131-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 381 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@2baab6a): State: TIMED_WAITING Blocked count: 0 Waited count: 96 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 382 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@704eea15): State: TIMED_WAITING Blocked count: 0 Waited count: 39 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 383 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@50177aa1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 384 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@239b419c): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 385 (CacheReplicationMonitor(516992859)): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 395 (pool-137-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 396 (qtp1990604458-396): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f97c4445da8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 397 (qtp1990604458-397-acceptor-0@5988a3f5-ServerConnector@4efd02b9{HTTP/1.1, (http/1.1)}{localhost:46177}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 398 (qtp1990604458-398): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 399 (qtp1990604458-399): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 400 (Session-HouseKeeper-290b945b-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 401 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 402 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@4f43a893): State: TIMED_WAITING Blocked count: 0 Waited count: 384 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 404 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 405 (IPC Server idle connection scanner for port 37595): State: TIMED_WAITING Blocked count: 1 Waited count: 21 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 407 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 39 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 410 (Command processor): State: WAITING Blocked count: 0 Waited count: 66 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7f93961c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 411 (BP-371475406-172.17.0.2-1731621127055 heartbeating to localhost/127.0.0.1:36589): State: TIMED_WAITING Blocked count: 113 Waited count: 178 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 412 (pool-139-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 413 (IPC Client (1512999374) connection to localhost/127.0.0.1:36589 from jenkins): State: TIMED_WAITING Blocked count: 88 Waited count: 89 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 394 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@247ec4bb): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 414 (IPC Parameter Sending Thread for localhost/127.0.0.1:36589): State: TIMED_WAITING Blocked count: 0 Waited count: 89 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 406 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 403 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 415 (IPC Server handler 0 on default port 37595): State: TIMED_WAITING Blocked count: 0 Waited count: 206 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 416 (IPC Server handler 1 on default port 37595): State: TIMED_WAITING Blocked count: 0 Waited count: 225 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 417 (IPC Server handler 2 on default port 37595): State: TIMED_WAITING Blocked count: 0 Waited count: 203 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 418 (IPC Server handler 3 on default port 37595): State: TIMED_WAITING Blocked count: 0 Waited count: 216 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 419 (IPC Server handler 4 on default port 37595): State: TIMED_WAITING Blocked count: 0 Waited count: 198 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 424 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c9ddaa9-300f-24a5-4986-3711490f80fe/cluster_1a016d2a-59f3-dbee-2f24-f11810945b1c/data/data1)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 425 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c9ddaa9-300f-24a5-4986-3711490f80fe/cluster_1a016d2a-59f3-dbee-2f24-f11810945b1c/data/data2)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 430 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c9ddaa9-300f-24a5-4986-3711490f80fe/cluster_1a016d2a-59f3-dbee-2f24-f11810945b1c/data/data1/current/BP-371475406-172.17.0.2-1731621127055): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 431 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c9ddaa9-300f-24a5-4986-3711490f80fe/cluster_1a016d2a-59f3-dbee-2f24-f11810945b1c/data/data2/current/BP-371475406-172.17.0.2-1731621127055): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 435 (pool-134-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 439 (java.util.concurrent.ThreadPoolExecutor$Worker@33026071[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 440 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 443 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 442 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 444 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:49891): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 441 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 20 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 445 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 96 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 446 (SyncThread:0): State: WAITING Blocked count: 4 Waited count: 250 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@49e3b6b0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 447 (ProcessThread(sid:0 cport:49891):): State: WAITING Blocked count: 0 Waited count: 281 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@562d6fcb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 448 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 285 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@25ad8a21 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 449 (NIOWorkerThread-1): State: WAITING Blocked count: 2 Waited count: 41 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72c65cb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 456 (HMaster-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 459 (NIOWorkerThread-2): State: WAITING Blocked count: 1 Waited count: 41 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72c65cb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 460 (NIOWorkerThread-3): State: WAITING Blocked count: 0 Waited count: 41 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72c65cb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 461 (NIOWorkerThread-4): State: WAITING Blocked count: 0 Waited count: 41 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72c65cb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 463 (NIOWorkerThread-5): State: WAITING Blocked count: 0 Waited count: 41 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72c65cb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 464 (NIOWorkerThread-6): State: WAITING Blocked count: 0 Waited count: 40 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72c65cb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 465 (NIOWorkerThread-7): State: WAITING Blocked count: 0 Waited count: 41 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72c65cb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 466 (NIOWorkerThread-8): State: WAITING Blocked count: 0 Waited count: 41 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72c65cb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 467 (NIOWorkerThread-9): State: WAITING Blocked count: 0 Waited count: 40 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72c65cb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 468 (NIOWorkerThread-10): State: WAITING Blocked count: 0 Waited count: 40 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72c65cb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 469 (NIOWorkerThread-11): State: WAITING Blocked count: 0 Waited count: 41 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72c65cb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 470 (NIOWorkerThread-12): State: WAITING Blocked count: 0 Waited count: 40 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72c65cb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 471 (NIOWorkerThread-13): State: WAITING Blocked count: 0 Waited count: 40 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72c65cb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 472 (NIOWorkerThread-14): State: WAITING Blocked count: 0 Waited count: 40 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72c65cb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 473 (NIOWorkerThread-15): State: WAITING Blocked count: 1 Waited count: 40 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72c65cb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 474 (NIOWorkerThread-16): State: WAITING Blocked count: 0 Waited count: 40 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72c65cb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 488 (HRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 533 (HRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 552 (SnapshotHandlerChoreCleaner): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@26c4226a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 560 (HMaster-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 573 (regionserver/ef788a4cade8:0.procedureResultReporter): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1b2a524e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 585 (HRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 592 (region-location-0): State: WAITING Blocked count: 4 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@521f2f88 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 594 (MyRegionServer-EventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 618 (HMaster-EventLoopGroup-7-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 620 (Time-limited test-SendThread(127.0.0.1:49891)): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 621 (Time-limited test-EventThread): State: WAITING Blocked count: 0 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@12fcea83 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 625 (zk-event-processor-pool-0): State: WAITING Blocked count: 8 Waited count: 19 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15ac4dc4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 627 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=52351): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@5c5dc51d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 628 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=52351): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@7715d271 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 629 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=52351): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@24896e2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 630 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=52351): State: WAITING Blocked count: 0 Waited count: 61 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@757d3902 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 631 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=52351): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@473a7f9d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 632 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=52351): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@473a7f9d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 633 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=52351): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@1ee5bb51 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 634 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=52351): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@585cd1e9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 635 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=52351): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@44f556c9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 636 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=52351): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@50271f5d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 637 (M:1;ef788a4cade8:52351): State: TIMED_WAITING Blocked count: 13 Waited count: 87 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$812/0x00007f97c4b96948.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 649 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 1869 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 650 (MyRegionServer-EventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 651 (HMaster-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 652 (MyRegionServer-EventLoopGroup-6-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 653 (HMaster-EventLoopGroup-7-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 668 (HMaster-EventLoopGroup-7-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 691 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 700 (RegionServerTracker-0): State: WAITING Blocked count: 7 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60871d07 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 706 (regionserver/ef788a4cade8:0.procedureResultReporter): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@fd78196 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 711 (region-location-0): State: WAITING Blocked count: 4 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@641d203c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 729 (AsyncFSWAL-1-hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData-prefix:ef788a4cade8,52351,1731621131811): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4ec5cb3c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 731 (Timer for 'DataNode' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 18 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-11-14T21:55:37,805 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-14T21:56:07,805 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:1;ef788a4cade8:52351 159 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 6 Waited count: 7 Waiting on java.lang.ref.ReferenceQueue$Lock@1f2a738a Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 9 Waited count: 14 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 15 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@306bc87a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 2659 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 27 Waiting on java.util.concurrent.CountDownLatch$Sync@4ac11f96 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 20 (Time-limited test): State: RUNNABLE Blocked count: 147 Waited count: 208 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.regionserver.TestRegionServerReportForDuty.tearDown(TestRegionServerReportForDuty.java:92) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) app//org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 1 Waited count: 2 Waiting on java.lang.ref.ReferenceQueue$Lock@7194b96c Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@1f52973c Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 53 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 151 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@7dcdc20 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 152 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 306 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 185 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@58b7cc5f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 186 (HRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 2 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 207 (RSDelayedStart-pool-0): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@e022181 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 210 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 26 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 212 (master/ef788a4cade8:61512:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 214 (master/ef788a4cade8:61512:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 216 (org.apache.hadoop.hdfs.PeerCache@d5c1683): State: TIMED_WAITING Blocked count: 0 Waited count: 87 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 235 (HRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 244 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@608013af Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (SnapshotHandlerChoreCleaner): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7f913817 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 243 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 25687 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (regionserver/ef788a4cade8:0.procedureResultReporter): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@665bcd71 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 290 (HRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 297 (region-location-0): State: WAITING Blocked count: 4 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7f61805a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 344 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@604512c2): State: TIMED_WAITING Blocked count: 0 Waited count: 505 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 345 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 51 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 346 (pool-125-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 347 (qtp450110968-347): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f97c4445da8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 348 (qtp450110968-348): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f97c4445da8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 349 (qtp450110968-349): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f97c4445da8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 350 (qtp450110968-350): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f97c4445da8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 351 (qtp450110968-351-acceptor-0@7445fad8-ServerConnector@6fcf9162{HTTP/1.1, (http/1.1)}{localhost:41375}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 352 (qtp450110968-352): State: TIMED_WAITING Blocked count: 0 Waited count: 5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 353 (qtp450110968-353): State: TIMED_WAITING Blocked count: 0 Waited count: 5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 354 (qtp450110968-354): State: TIMED_WAITING Blocked count: 0 Waited count: 5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 355 (Session-HouseKeeper-2093133b-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 356 (pool-126-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 363 (FSEditLogAsync): State: WAITING Blocked count: 0 Waited count: 129 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5a8c5076 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 365 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 366 (IPC Server idle connection scanner for port 36589): State: TIMED_WAITING Blocked count: 1 Waited count: 27 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 368 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 51 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 371 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@66ed21f0): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 372 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 84 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 359 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@2c5dcaa2): State: TIMED_WAITING Blocked count: 0 Waited count: 51 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 357 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 85 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 358 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 24978 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 361 (Block report processor): State: WAITING Blocked count: 0 Waited count: 27 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2043c6bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 367 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 364 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 374 (IPC Server handler 0 on default port 36589): State: TIMED_WAITING Blocked count: 1 Waited count: 346 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 375 (IPC Server handler 1 on default port 36589): State: TIMED_WAITING Blocked count: 0 Waited count: 339 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 376 (IPC Server handler 2 on default port 36589): State: TIMED_WAITING Blocked count: 0 Waited count: 339 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 377 (IPC Server handler 3 on default port 36589): State: TIMED_WAITING Blocked count: 6 Waited count: 339 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 378 (IPC Server handler 4 on default port 36589): State: TIMED_WAITING Blocked count: 1 Waited count: 342 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 379 (pool-131-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 381 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@2baab6a): State: TIMED_WAITING Blocked count: 0 Waited count: 127 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 382 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@704eea15): State: TIMED_WAITING Blocked count: 0 Waited count: 51 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 383 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@50177aa1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 384 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@239b419c): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 385 (CacheReplicationMonitor(516992859)): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 395 (pool-137-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 396 (qtp1990604458-396): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f97c4445da8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 397 (qtp1990604458-397-acceptor-0@5988a3f5-ServerConnector@4efd02b9{HTTP/1.1, (http/1.1)}{localhost:46177}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 398 (qtp1990604458-398): State: TIMED_WAITING Blocked count: 0 Waited count: 5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 399 (qtp1990604458-399): State: TIMED_WAITING Blocked count: 0 Waited count: 5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 400 (Session-HouseKeeper-290b945b-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 401 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 402 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@4f43a893): State: TIMED_WAITING Blocked count: 0 Waited count: 504 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 404 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 405 (IPC Server idle connection scanner for port 37595): State: TIMED_WAITING Blocked count: 1 Waited count: 27 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 407 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 51 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 410 (Command processor): State: WAITING Blocked count: 0 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7f93961c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 411 (BP-371475406-172.17.0.2-1731621127055 heartbeating to localhost/127.0.0.1:36589): State: TIMED_WAITING Blocked count: 133 Waited count: 218 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 412 (pool-139-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 413 (IPC Client (1512999374) connection to localhost/127.0.0.1:36589 from jenkins): State: TIMED_WAITING Blocked count: 108 Waited count: 109 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 394 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@247ec4bb): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 414 (IPC Parameter Sending Thread for localhost/127.0.0.1:36589): State: TIMED_WAITING Blocked count: 0 Waited count: 109 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 406 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 403 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 415 (IPC Server handler 0 on default port 37595): State: TIMED_WAITING Blocked count: 0 Waited count: 271 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 416 (IPC Server handler 1 on default port 37595): State: TIMED_WAITING Blocked count: 0 Waited count: 285 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 417 (IPC Server handler 2 on default port 37595): State: TIMED_WAITING Blocked count: 0 Waited count: 263 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 418 (IPC Server handler 3 on default port 37595): State: TIMED_WAITING Blocked count: 0 Waited count: 282 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 419 (IPC Server handler 4 on default port 37595): State: TIMED_WAITING Blocked count: 0 Waited count: 258 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 424 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c9ddaa9-300f-24a5-4986-3711490f80fe/cluster_1a016d2a-59f3-dbee-2f24-f11810945b1c/data/data1)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 425 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c9ddaa9-300f-24a5-4986-3711490f80fe/cluster_1a016d2a-59f3-dbee-2f24-f11810945b1c/data/data2)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 430 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c9ddaa9-300f-24a5-4986-3711490f80fe/cluster_1a016d2a-59f3-dbee-2f24-f11810945b1c/data/data1/current/BP-371475406-172.17.0.2-1731621127055): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 431 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c9ddaa9-300f-24a5-4986-3711490f80fe/cluster_1a016d2a-59f3-dbee-2f24-f11810945b1c/data/data2/current/BP-371475406-172.17.0.2-1731621127055): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 435 (pool-134-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 439 (java.util.concurrent.ThreadPoolExecutor$Worker@33026071[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 440 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 443 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 442 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 444 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:49891): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 441 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 26 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 445 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 126 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 446 (SyncThread:0): State: WAITING Blocked count: 4 Waited count: 255 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@49e3b6b0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 447 (ProcessThread(sid:0 cport:49891):): State: WAITING Blocked count: 0 Waited count: 286 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@562d6fcb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 448 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 290 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@25ad8a21 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 449 (NIOWorkerThread-1): State: WAITING Blocked count: 2 Waited count: 42 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72c65cb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 456 (HMaster-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 459 (NIOWorkerThread-2): State: WAITING Blocked count: 1 Waited count: 42 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72c65cb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 460 (NIOWorkerThread-3): State: WAITING Blocked count: 0 Waited count: 41 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72c65cb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 461 (NIOWorkerThread-4): State: WAITING Blocked count: 0 Waited count: 41 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72c65cb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 463 (NIOWorkerThread-5): State: WAITING Blocked count: 0 Waited count: 41 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72c65cb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 464 (NIOWorkerThread-6): State: WAITING Blocked count: 0 Waited count: 41 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72c65cb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 465 (NIOWorkerThread-7): State: WAITING Blocked count: 0 Waited count: 41 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72c65cb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 466 (NIOWorkerThread-8): State: WAITING Blocked count: 0 Waited count: 41 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72c65cb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 467 (NIOWorkerThread-9): State: WAITING Blocked count: 0 Waited count: 41 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72c65cb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 468 (NIOWorkerThread-10): State: WAITING Blocked count: 0 Waited count: 41 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72c65cb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 469 (NIOWorkerThread-11): State: WAITING Blocked count: 0 Waited count: 41 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72c65cb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 470 (NIOWorkerThread-12): State: WAITING Blocked count: 0 Waited count: 41 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72c65cb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 471 (NIOWorkerThread-13): State: WAITING Blocked count: 0 Waited count: 41 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72c65cb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 472 (NIOWorkerThread-14): State: WAITING Blocked count: 0 Waited count: 41 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72c65cb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 473 (NIOWorkerThread-15): State: WAITING Blocked count: 1 Waited count: 41 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72c65cb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 474 (NIOWorkerThread-16): State: WAITING Blocked count: 0 Waited count: 41 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72c65cb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 488 (HRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 533 (HRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 552 (SnapshotHandlerChoreCleaner): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@26c4226a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 560 (HMaster-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 573 (regionserver/ef788a4cade8:0.procedureResultReporter): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1b2a524e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 585 (HRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 592 (region-location-0): State: WAITING Blocked count: 4 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@521f2f88 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 594 (MyRegionServer-EventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 618 (HMaster-EventLoopGroup-7-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 620 (Time-limited test-SendThread(127.0.0.1:49891)): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 621 (Time-limited test-EventThread): State: WAITING Blocked count: 0 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@12fcea83 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 625 (zk-event-processor-pool-0): State: WAITING Blocked count: 8 Waited count: 19 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15ac4dc4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 627 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=52351): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@5c5dc51d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 628 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=52351): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@7715d271 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 629 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=52351): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@24896e2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 630 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=52351): State: WAITING Blocked count: 0 Waited count: 61 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@757d3902 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 631 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=52351): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@473a7f9d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 632 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=52351): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@473a7f9d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 633 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=52351): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@1ee5bb51 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 634 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=52351): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@585cd1e9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 635 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=52351): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@44f556c9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 636 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=52351): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@50271f5d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 637 (M:1;ef788a4cade8:52351): State: TIMED_WAITING Blocked count: 13 Waited count: 87 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$812/0x00007f97c4b96948.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 649 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 2469 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 650 (MyRegionServer-EventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 651 (HMaster-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 652 (MyRegionServer-EventLoopGroup-6-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 653 (HMaster-EventLoopGroup-7-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 668 (HMaster-EventLoopGroup-7-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 691 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 25 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 700 (RegionServerTracker-0): State: WAITING Blocked count: 7 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60871d07 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 706 (regionserver/ef788a4cade8:0.procedureResultReporter): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@fd78196 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 711 (region-location-0): State: WAITING Blocked count: 4 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@641d203c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 729 (AsyncFSWAL-1-hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData-prefix:ef788a4cade8,52351,1731621131811): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4ec5cb3c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 731 (Timer for 'DataNode' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 24 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-11-14T21:56:37,805 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-14T21:57:01,149 DEBUG [master/ef788a4cade8:61512:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=3, created chunk count=4, reused chunk count=5, reuseRatio=55.56% 2024-11-14T21:57:01,151 DEBUG [master/ef788a4cade8:61512:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-11-14T21:57:07,806 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-14T21:57:08,978 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T21:57:09,521 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-14T21:57:09,524 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T21:57:19,195 DEBUG [M:1;ef788a4cade8:52351 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731621139190Disabling compacts and flushes for region at 1731621139190Disabling writes for close at 1731621139191 (+1 ms)Obtaining lock to block concurrent updates at 1731621139191Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731621139191Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=65, getHeapSize=1104, getOffHeapSize=0, getCellsCount=1 at 1731621139192 (+1 ms)Failed flush master:store,,1.1595e783b53d99cd5eef43b6debb2682., putting online again at 1731621439194 (+300002 ms) 2024-11-14T21:57:19,195 WARN [M:1;ef788a4cade8:52351 {}] region.MasterRegion(134): Failed to close region org.apache.hadoop.hbase.regionserver.wal.WALSyncTimeoutIOException: org.apache.hadoop.hbase.exceptions.TimeoutIOException: Failed to get sync result after 300000 ms for txid=4, WAL system stuck? at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1033) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.closeRegion(MasterRegion.java:132) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.close(MasterRegion.java:205) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.stopServiceThreads(HMaster.java:1819) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:631) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.exceptions.TimeoutIOException: Failed to get sync result after 300000 ms for txid=4, WAL system stuck? at org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:171) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) ~[classes/:?] ... 19 more 2024-11-14T21:57:19,197 WARN [Close-WAL-Writer-0 {}] wal.AsyncProtobufLogWriter(165): normal close failed, try recover java.lang.NullPointerException: Cannot invoke "org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf.ensureWritable(int)" because "this.buf" is null at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.writeInt(FanOutOneBlockAsyncDFSOutput.java:391) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.lambda$writeWALTrailerAndMagic$3(AsyncProtobufLogWriter.java:247) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.writeWALMetadata(AsyncProtobufLogWriter.java:203) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.writeWALTrailerAndMagic(AsyncProtobufLogWriter.java:240) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractProtobufLogWriter.writeWALTrailer(AbstractProtobufLogWriter.java:252) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:162) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2041) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T21:57:19,198 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file /user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/WALs/ef788a4cade8,52351,1731621131811/ef788a4cade8%2C52351%2C1731621131811.1731621136326 2024-11-14T21:57:19,198 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=0 on file=/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/WALs/ef788a4cade8,52351,1731621131811/ef788a4cade8%2C52351%2C1731621131811.1731621136326 after 0ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.recoverAndClose(FanOutOneBlockAsyncDFSOutput.java:605) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:166) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2041) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T21:57:19,198 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.InterruptedIOException: Operation cancelled at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.checkIfCancelled(RecoverLeaseFSUtils.java:269) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:159) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.recoverAndClose(FanOutOneBlockAsyncDFSOutput.java:605) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:166) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2041) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T21:57:19,198 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/WALs/ef788a4cade8,52351,1731621131811/ef788a4cade8%2C52351%2C1731621131811.1731621136326 2024-11-14T21:57:19,199 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=0 on file=hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/WALs/ef788a4cade8,52351,1731621131811/ef788a4cade8%2C52351%2C1731621131811.1731621136326 after 1ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:1;ef788a4cade8:52351 161 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 6 Waited count: 7 Waiting on java.lang.ref.ReferenceQueue$Lock@1f2a738a Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 9 Waited count: 15 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: WAITING Blocked count: 0 Waited count: 17 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@306bc87a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: RUNNABLE Blocked count: 0 Waited count: 17 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 3258 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 33 Waiting on java.util.concurrent.CountDownLatch$Sync@3c280169 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 20 (Time-limited test): State: RUNNABLE Blocked count: 147 Waited count: 209 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.regionserver.TestRegionServerReportForDuty.tearDown(TestRegionServerReportForDuty.java:92) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) app//org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 1 Waited count: 2 Waiting on java.lang.ref.ReferenceQueue$Lock@7194b96c Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@1f52973c Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 65 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 151 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@7dcdc20 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 152 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 381 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 185 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@58b7cc5f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 186 (HRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 2 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 207 (RSDelayedStart-pool-0): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@e022181 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 210 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 32 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 212 (master/ef788a4cade8:61512:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 214 (master/ef788a4cade8:61512:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 216 (org.apache.hadoop.hdfs.PeerCache@d5c1683): State: TIMED_WAITING Blocked count: 0 Waited count: 107 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 235 (HRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 244 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@608013af Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (SnapshotHandlerChoreCleaner): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7f913817 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 243 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 31688 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (regionserver/ef788a4cade8:0.procedureResultReporter): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@665bcd71 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 290 (HRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 297 (region-location-0): State: WAITING Blocked count: 4 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7f61805a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 344 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@604512c2): State: TIMED_WAITING Blocked count: 0 Waited count: 625 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 345 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 63 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 346 (pool-125-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 347 (qtp450110968-347): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f97c4445da8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 348 (qtp450110968-348): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f97c4445da8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 349 (qtp450110968-349): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f97c4445da8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 350 (qtp450110968-350): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f97c4445da8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 351 (qtp450110968-351-acceptor-0@7445fad8-ServerConnector@6fcf9162{HTTP/1.1, (http/1.1)}{localhost:41375}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 352 (qtp450110968-352): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 353 (qtp450110968-353): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 354 (qtp450110968-354): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 355 (Session-HouseKeeper-2093133b-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 356 (pool-126-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 363 (FSEditLogAsync): State: WAITING Blocked count: 0 Waited count: 129 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5a8c5076 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 365 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 366 (IPC Server idle connection scanner for port 36589): State: TIMED_WAITING Blocked count: 1 Waited count: 33 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 368 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 63 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 371 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@66ed21f0): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 372 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 105 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 359 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@2c5dcaa2): State: TIMED_WAITING Blocked count: 0 Waited count: 63 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 357 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 105 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 358 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 30922 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 361 (Block report processor): State: WAITING Blocked count: 0 Waited count: 27 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2043c6bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 367 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 364 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 374 (IPC Server handler 0 on default port 36589): State: TIMED_WAITING Blocked count: 1 Waited count: 406 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 375 (IPC Server handler 1 on default port 36589): State: TIMED_WAITING Blocked count: 0 Waited count: 399 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 376 (IPC Server handler 2 on default port 36589): State: TIMED_WAITING Blocked count: 0 Waited count: 399 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 377 (IPC Server handler 3 on default port 36589): State: TIMED_WAITING Blocked count: 6 Waited count: 399 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 378 (IPC Server handler 4 on default port 36589): State: TIMED_WAITING Blocked count: 1 Waited count: 402 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 379 (pool-131-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 381 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@2baab6a): State: TIMED_WAITING Blocked count: 0 Waited count: 157 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 382 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@704eea15): State: TIMED_WAITING Blocked count: 0 Waited count: 63 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 383 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@50177aa1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 384 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@239b419c): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 385 (CacheReplicationMonitor(516992859)): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 395 (pool-137-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 396 (qtp1990604458-396): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f97c4445da8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 397 (qtp1990604458-397-acceptor-0@5988a3f5-ServerConnector@4efd02b9{HTTP/1.1, (http/1.1)}{localhost:46177}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 398 (qtp1990604458-398): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 399 (qtp1990604458-399): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 400 (Session-HouseKeeper-290b945b-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 401 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 402 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@4f43a893): State: TIMED_WAITING Blocked count: 0 Waited count: 624 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 404 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 405 (IPC Server idle connection scanner for port 37595): State: TIMED_WAITING Blocked count: 1 Waited count: 33 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 407 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 63 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 410 (Command processor): State: WAITING Blocked count: 0 Waited count: 106 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7f93961c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 411 (BP-371475406-172.17.0.2-1731621127055 heartbeating to localhost/127.0.0.1:36589): State: TIMED_WAITING Blocked count: 153 Waited count: 258 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 412 (pool-139-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 413 (IPC Client (1512999374) connection to localhost/127.0.0.1:36589 from jenkins): State: TIMED_WAITING Blocked count: 128 Waited count: 129 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 394 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@247ec4bb): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 414 (IPC Parameter Sending Thread for localhost/127.0.0.1:36589): State: TIMED_WAITING Blocked count: 0 Waited count: 129 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 406 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 403 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 415 (IPC Server handler 0 on default port 37595): State: TIMED_WAITING Blocked count: 0 Waited count: 332 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 416 (IPC Server handler 1 on default port 37595): State: TIMED_WAITING Blocked count: 0 Waited count: 345 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 417 (IPC Server handler 2 on default port 37595): State: TIMED_WAITING Blocked count: 0 Waited count: 323 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 418 (IPC Server handler 3 on default port 37595): State: TIMED_WAITING Blocked count: 0 Waited count: 344 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 419 (IPC Server handler 4 on default port 37595): State: TIMED_WAITING Blocked count: 0 Waited count: 319 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 424 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c9ddaa9-300f-24a5-4986-3711490f80fe/cluster_1a016d2a-59f3-dbee-2f24-f11810945b1c/data/data1)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 425 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c9ddaa9-300f-24a5-4986-3711490f80fe/cluster_1a016d2a-59f3-dbee-2f24-f11810945b1c/data/data2)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 430 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c9ddaa9-300f-24a5-4986-3711490f80fe/cluster_1a016d2a-59f3-dbee-2f24-f11810945b1c/data/data1/current/BP-371475406-172.17.0.2-1731621127055): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 431 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c9ddaa9-300f-24a5-4986-3711490f80fe/cluster_1a016d2a-59f3-dbee-2f24-f11810945b1c/data/data2/current/BP-371475406-172.17.0.2-1731621127055): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 435 (pool-134-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 439 (java.util.concurrent.ThreadPoolExecutor$Worker@33026071[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 440 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 443 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 442 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 444 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:49891): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 441 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 32 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 445 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 156 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 446 (SyncThread:0): State: WAITING Blocked count: 4 Waited count: 259 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@49e3b6b0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 447 (ProcessThread(sid:0 cport:49891):): State: WAITING Blocked count: 0 Waited count: 290 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@562d6fcb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 448 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 294 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@25ad8a21 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 449 (NIOWorkerThread-1): State: WAITING Blocked count: 2 Waited count: 42 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72c65cb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 456 (HMaster-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 459 (NIOWorkerThread-2): State: WAITING Blocked count: 1 Waited count: 42 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72c65cb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 460 (NIOWorkerThread-3): State: WAITING Blocked count: 0 Waited count: 42 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72c65cb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 461 (NIOWorkerThread-4): State: WAITING Blocked count: 0 Waited count: 42 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72c65cb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 463 (NIOWorkerThread-5): State: WAITING Blocked count: 0 Waited count: 42 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72c65cb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 464 (NIOWorkerThread-6): State: WAITING Blocked count: 0 Waited count: 42 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72c65cb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 465 (NIOWorkerThread-7): State: WAITING Blocked count: 0 Waited count: 42 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72c65cb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 466 (NIOWorkerThread-8): State: WAITING Blocked count: 0 Waited count: 42 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72c65cb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 467 (NIOWorkerThread-9): State: WAITING Blocked count: 0 Waited count: 42 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72c65cb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 468 (NIOWorkerThread-10): State: WAITING Blocked count: 0 Waited count: 41 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72c65cb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 469 (NIOWorkerThread-11): State: WAITING Blocked count: 0 Waited count: 42 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72c65cb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 470 (NIOWorkerThread-12): State: WAITING Blocked count: 0 Waited count: 41 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72c65cb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 471 (NIOWorkerThread-13): State: WAITING Blocked count: 0 Waited count: 41 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72c65cb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 472 (NIOWorkerThread-14): State: WAITING Blocked count: 0 Waited count: 41 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72c65cb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 473 (NIOWorkerThread-15): State: WAITING Blocked count: 1 Waited count: 41 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72c65cb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 474 (NIOWorkerThread-16): State: WAITING Blocked count: 0 Waited count: 41 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72c65cb4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 488 (HRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 533 (HRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 552 (SnapshotHandlerChoreCleaner): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@26c4226a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 560 (HMaster-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 573 (regionserver/ef788a4cade8:0.procedureResultReporter): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1b2a524e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 585 (HRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 592 (region-location-0): State: WAITING Blocked count: 4 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@521f2f88 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 594 (MyRegionServer-EventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 618 (HMaster-EventLoopGroup-7-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 620 (Time-limited test-SendThread(127.0.0.1:49891)): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 621 (Time-limited test-EventThread): State: WAITING Blocked count: 0 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@12fcea83 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 625 (zk-event-processor-pool-0): State: WAITING Blocked count: 8 Waited count: 19 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15ac4dc4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 627 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=52351): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@5c5dc51d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 628 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=52351): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@7715d271 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 629 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=52351): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@24896e2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 630 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=52351): State: WAITING Blocked count: 0 Waited count: 61 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@757d3902 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 631 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=52351): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@473a7f9d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 632 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=52351): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@473a7f9d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 633 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=52351): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@1ee5bb51 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 634 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=52351): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@585cd1e9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 635 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=52351): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@44f556c9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 636 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=52351): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@50271f5d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 637 (M:1;ef788a4cade8:52351): State: TIMED_WAITING Blocked count: 13 Waited count: 88 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.shutdown(AbstractFSWAL.java:1195) app//org.apache.hadoop.hbase.wal.AbstractFSWALProvider.shutdown0(AbstractFSWALProvider.java:162) app//org.apache.hadoop.hbase.wal.AbstractWALProvider$$Lambda$846/0x00007f97c4bc5a78.run(Unknown Source) app//org.apache.hadoop.hbase.wal.AbstractWALProvider.cleanup(AbstractWALProvider.java:287) app//org.apache.hadoop.hbase.wal.AbstractWALProvider.shutdown(AbstractWALProvider.java:299) app//org.apache.hadoop.hbase.wal.WALFactory.shutdown(WALFactory.java:341) app//org.apache.hadoop.hbase.master.region.MasterRegion.shutdownWAL(MasterRegion.java:140) app//org.apache.hadoop.hbase.master.region.MasterRegion.close(MasterRegion.java:206) app//org.apache.hadoop.hbase.master.HMaster.stopServiceThreads(HMaster.java:1819) app//org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:631) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 649 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 3068 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 650 (MyRegionServer-EventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 651 (HMaster-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 652 (MyRegionServer-EventLoopGroup-6-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 653 (HMaster-EventLoopGroup-7-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 668 (HMaster-EventLoopGroup-7-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 691 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 31 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 700 (RegionServerTracker-0): State: WAITING Blocked count: 7 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60871d07 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 706 (regionserver/ef788a4cade8:0.procedureResultReporter): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@fd78196 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 711 (region-location-0): State: WAITING Blocked count: 4 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@641d203c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 729 (AsyncFSWAL-1-hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData-prefix:ef788a4cade8,52351,1731621131811): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4ec5cb3c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 732 (Timer for 'DataNode' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 733 (WAL-Shutdown-0): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.awaitTermination(ThreadPoolExecutor.java:1464) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doShutdown(AbstractFSWAL.java:2117) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$2.call(AbstractFSWAL.java:1179) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$2.call(AbstractFSWAL.java:1174) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 734 (Close-WAL-Writer-0): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:166) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$838/0x00007f97c4bb7800.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) 2024-11-14T21:57:23,199 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=1 on file=hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/WALs/ef788a4cade8,52351,1731621131811/ef788a4cade8%2C52351%2C1731621131811.1731621136326 after 4001ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T21:57:24,197 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.async.wait.on.shutdown.seconds" 2024-11-14T21:57:24,198 INFO [M:1;ef788a4cade8:52351 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-14T21:57:24,198 INFO [M:1;ef788a4cade8:52351 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:52351 2024-11-14T21:57:24,198 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T21:57:24,199 INFO [M:1;ef788a4cade8:52351 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T21:57:24,202 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/WALs/ef788a4cade8,52351,1731621131811/ef788a4cade8%2C52351%2C1731621131811.1731621136326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 12 more 2024-11-14T21:57:24,303 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:52351-0x100ac6d169d0003, quorum=127.0.0.1:49891, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T21:57:24,303 INFO [M:1;ef788a4cade8:52351 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T21:57:24,303 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:52351-0x100ac6d169d0003, quorum=127.0.0.1:49891, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T21:57:24,307 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-14T21:57:24,310 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@46eae375{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T21:57:24,310 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4efd02b9{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T21:57:24,310 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T21:57:24,310 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1873df08{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T21:57:24,311 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@a9c440{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c9ddaa9-300f-24a5-4986-3711490f80fe/hadoop.log.dir/,STOPPED} 2024-11-14T21:57:24,312 WARN [BP-371475406-172.17.0.2-1731621127055 heartbeating to localhost/127.0.0.1:36589 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T21:57:24,312 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T21:57:24,312 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T21:57:24,312 WARN [BP-371475406-172.17.0.2-1731621127055 heartbeating to localhost/127.0.0.1:36589 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-371475406-172.17.0.2-1731621127055 (Datanode Uuid 6b441693-ca33-4907-897a-89104b4b6a9d) service to localhost/127.0.0.1:36589 2024-11-14T21:57:24,313 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c9ddaa9-300f-24a5-4986-3711490f80fe/cluster_1a016d2a-59f3-dbee-2f24-f11810945b1c/data/data1/current/BP-371475406-172.17.0.2-1731621127055 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T21:57:24,313 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c9ddaa9-300f-24a5-4986-3711490f80fe/cluster_1a016d2a-59f3-dbee-2f24-f11810945b1c/data/data2/current/BP-371475406-172.17.0.2-1731621127055 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T21:57:24,314 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T21:57:24,322 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@39f213d6{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-14T21:57:24,323 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6fcf9162{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T21:57:24,323 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T21:57:24,323 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3e3e5b71{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T21:57:24,323 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@209802d1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c9ddaa9-300f-24a5-4986-3711490f80fe/hadoop.log.dir/,STOPPED} 2024-11-14T21:57:24,333 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.TestRegionServerReportForDuty#testReportForDutyWithMasterChange Thread=62 (was 50) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MyRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HRegionServer-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$838/0x00007f97c4bb7800.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36589 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MyRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-4-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-4-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HRegionServer-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-4-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1512999374) connection to localhost/127.0.0.1:36589 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MyRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HRegionServer-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=356 (was 332) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=339 (was 763), ProcessCount=11 (was 11), AvailableMemoryMB=3065 (was 4112) 2024-11-14T21:57:24,341 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.TestRegionServerReportForDuty#testReportForDutyGetsRejectedByMasterWhenConfiguredToRejectDecommissionedHosts Thread=63, OpenFileDescriptor=356, MaxFileDescriptor=1048576, SystemLoadAverage=339, ProcessCount=11, AvailableMemoryMB=3063 2024-11-14T21:57:24,355 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c9ddaa9-300f-24a5-4986-3711490f80fe/hadoop.log.dir so I do NOT create it in target/test-data/d7dd3d42-e19b-d5bb-c6c8-5553e79df628 2024-11-14T21:57:24,355 WARN [Time-limited test {}] hbase.HBaseTestingUtil(405): hadoop.log.dir property value differs in configuration and system: Configuration=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/../logs while System=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c9ddaa9-300f-24a5-4986-3711490f80fe/hadoop.log.dir Erasing configuration value by system value. 2024-11-14T21:57:24,355 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c9ddaa9-300f-24a5-4986-3711490f80fe/hadoop.tmp.dir so I do NOT create it in target/test-data/d7dd3d42-e19b-d5bb-c6c8-5553e79df628 2024-11-14T21:57:24,356 WARN [Time-limited test {}] hbase.HBaseTestingUtil(405): hadoop.tmp.dir property value differs in configuration and system: Configuration=/tmp/hadoop-jenkins while System=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c9ddaa9-300f-24a5-4986-3711490f80fe/hadoop.tmp.dir Erasing configuration value by system value. 2024-11-14T21:57:24,356 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7dd3d42-e19b-d5bb-c6c8-5553e79df628 2024-11-14T21:57:24,356 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7dd3d42-e19b-d5bb-c6c8-5553e79df628/cluster_513b0af5-c531-7cf9-7aeb-97960bb26752, deleteOnExit=true 2024-11-14T21:57:24,356 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7dd3d42-e19b-d5bb-c6c8-5553e79df628/test.cache.data in system properties and HBase conf 2024-11-14T21:57:24,356 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7dd3d42-e19b-d5bb-c6c8-5553e79df628/hadoop.tmp.dir in system properties and HBase conf 2024-11-14T21:57:24,356 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7dd3d42-e19b-d5bb-c6c8-5553e79df628/hadoop.log.dir in system properties and HBase conf 2024-11-14T21:57:24,356 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7dd3d42-e19b-d5bb-c6c8-5553e79df628/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-14T21:57:24,356 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7dd3d42-e19b-d5bb-c6c8-5553e79df628/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-14T21:57:24,356 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-14T21:57:24,356 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-14T21:57:24,357 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7dd3d42-e19b-d5bb-c6c8-5553e79df628/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-14T21:57:24,357 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7dd3d42-e19b-d5bb-c6c8-5553e79df628/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-14T21:57:24,357 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7dd3d42-e19b-d5bb-c6c8-5553e79df628/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-14T21:57:24,357 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7dd3d42-e19b-d5bb-c6c8-5553e79df628/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-14T21:57:24,357 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7dd3d42-e19b-d5bb-c6c8-5553e79df628/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-14T21:57:24,357 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7dd3d42-e19b-d5bb-c6c8-5553e79df628/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-14T21:57:24,357 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7dd3d42-e19b-d5bb-c6c8-5553e79df628/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-14T21:57:24,357 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7dd3d42-e19b-d5bb-c6c8-5553e79df628/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-14T21:57:24,357 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7dd3d42-e19b-d5bb-c6c8-5553e79df628/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-14T21:57:24,357 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7dd3d42-e19b-d5bb-c6c8-5553e79df628/nfs.dump.dir in system properties and HBase conf 2024-11-14T21:57:24,357 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7dd3d42-e19b-d5bb-c6c8-5553e79df628/java.io.tmpdir in system properties and HBase conf 2024-11-14T21:57:24,357 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7dd3d42-e19b-d5bb-c6c8-5553e79df628/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-14T21:57:24,357 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7dd3d42-e19b-d5bb-c6c8-5553e79df628/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-14T21:57:24,357 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7dd3d42-e19b-d5bb-c6c8-5553e79df628/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-14T21:57:24,485 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T21:57:24,492 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T21:57:24,508 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T21:57:24,508 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T21:57:24,508 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-14T21:57:24,509 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T21:57:24,514 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6315fee5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7dd3d42-e19b-d5bb-c6c8-5553e79df628/hadoop.log.dir/,AVAILABLE} 2024-11-14T21:57:24,515 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3d39a7cf{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T21:57:24,684 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@c1c27ff{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7dd3d42-e19b-d5bb-c6c8-5553e79df628/java.io.tmpdir/jetty-localhost-35347-hadoop-hdfs-3_4_1-tests_jar-_-any-2760888455391626585/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-14T21:57:24,685 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@35919168{HTTP/1.1, (http/1.1)}{localhost:35347} 2024-11-14T21:57:24,685 INFO [Time-limited test {}] server.Server(415): Started @332126ms 2024-11-14T21:57:24,868 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T21:57:24,881 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T21:57:24,895 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T21:57:24,895 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T21:57:24,895 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-14T21:57:24,899 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6b15505e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7dd3d42-e19b-d5bb-c6c8-5553e79df628/hadoop.log.dir/,AVAILABLE} 2024-11-14T21:57:24,900 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@e7a99c5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T21:57:25,057 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@477b7dda{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7dd3d42-e19b-d5bb-c6c8-5553e79df628/java.io.tmpdir/jetty-localhost-35671-hadoop-hdfs-3_4_1-tests_jar-_-any-9545986105017910070/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T21:57:25,058 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@75cff82f{HTTP/1.1, (http/1.1)}{localhost:35671} 2024-11-14T21:57:25,058 INFO [Time-limited test {}] server.Server(415): Started @332499ms 2024-11-14T21:57:25,060 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T21:57:25,200 WARN [Thread-378 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7dd3d42-e19b-d5bb-c6c8-5553e79df628/cluster_513b0af5-c531-7cf9-7aeb-97960bb26752/data/data1/current/BP-912554017-172.17.0.2-1731621444390/current, will proceed with Du for space computation calculation, 2024-11-14T21:57:25,202 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/WALs/ef788a4cade8,52351,1731621131811/ef788a4cade8%2C52351%2C1731621131811.1731621136326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 12 more 2024-11-14T21:57:25,215 WARN [Thread-379 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7dd3d42-e19b-d5bb-c6c8-5553e79df628/cluster_513b0af5-c531-7cf9-7aeb-97960bb26752/data/data2/current/BP-912554017-172.17.0.2-1731621444390/current, will proceed with Du for space computation calculation, 2024-11-14T21:57:25,238 WARN [Thread-365 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T21:57:25,242 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd50afdf980c7bc2 with lease ID 0x31842b4b4be7df2f: Processing first storage report for DS-28990a7a-36c2-4424-a823-96ea0ab3631d from datanode DatanodeRegistration(127.0.0.1:39179, datanodeUuid=12604faa-dd5a-4367-b93e-4a85fb835e72, infoPort=42213, infoSecurePort=0, ipcPort=46657, storageInfo=lv=-57;cid=testClusterID;nsid=341677410;c=1731621444390) 2024-11-14T21:57:25,242 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd50afdf980c7bc2 with lease ID 0x31842b4b4be7df2f: from storage DS-28990a7a-36c2-4424-a823-96ea0ab3631d node DatanodeRegistration(127.0.0.1:39179, datanodeUuid=12604faa-dd5a-4367-b93e-4a85fb835e72, infoPort=42213, infoSecurePort=0, ipcPort=46657, storageInfo=lv=-57;cid=testClusterID;nsid=341677410;c=1731621444390), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T21:57:25,242 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd50afdf980c7bc2 with lease ID 0x31842b4b4be7df2f: Processing first storage report for DS-626b279f-b970-4841-86d2-b5b4082bb32b from datanode DatanodeRegistration(127.0.0.1:39179, datanodeUuid=12604faa-dd5a-4367-b93e-4a85fb835e72, infoPort=42213, infoSecurePort=0, ipcPort=46657, storageInfo=lv=-57;cid=testClusterID;nsid=341677410;c=1731621444390) 2024-11-14T21:57:25,242 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd50afdf980c7bc2 with lease ID 0x31842b4b4be7df2f: from storage DS-626b279f-b970-4841-86d2-b5b4082bb32b node DatanodeRegistration(127.0.0.1:39179, datanodeUuid=12604faa-dd5a-4367-b93e-4a85fb835e72, infoPort=42213, infoSecurePort=0, ipcPort=46657, storageInfo=lv=-57;cid=testClusterID;nsid=341677410;c=1731621444390), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T21:57:25,338 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7dd3d42-e19b-d5bb-c6c8-5553e79df628 2024-11-14T21:57:25,344 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7dd3d42-e19b-d5bb-c6c8-5553e79df628/cluster_513b0af5-c531-7cf9-7aeb-97960bb26752/zookeeper_0, clientPort=56659, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7dd3d42-e19b-d5bb-c6c8-5553e79df628/cluster_513b0af5-c531-7cf9-7aeb-97960bb26752/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7dd3d42-e19b-d5bb-c6c8-5553e79df628/cluster_513b0af5-c531-7cf9-7aeb-97960bb26752/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-14T21:57:25,349 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=56659 2024-11-14T21:57:25,349 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T21:57:25,351 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T21:57:25,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39179 is added to blk_1073741825_1001 (size=7) 2024-11-14T21:57:25,370 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:44917/user/jenkins/test-data/d1d74c93-0571-bf41-df07-26c5780742b7 with version=8 2024-11-14T21:57:25,373 INFO [Time-limited test {}] client.ConnectionUtils(128): master/ef788a4cade8:63118 server-side Connection retries=45 2024-11-14T21:57:25,373 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T21:57:25,373 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T21:57:25,373 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T21:57:25,373 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T21:57:25,373 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T21:57:25,374 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-14T21:57:25,374 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T21:57:25,375 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:63118 2024-11-14T21:57:25,379 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:63118 connecting to ZooKeeper ensemble=127.0.0.1:56659 2024-11-14T21:57:25,394 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:631180x0, quorum=127.0.0.1:56659, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T21:57:25,399 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:63118-0x100ac71eeff0000 connected 2024-11-14T21:57:25,432 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T21:57:25,434 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T21:57:25,438 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:63118-0x100ac71eeff0000, quorum=127.0.0.1:56659, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T21:57:25,438 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:44917/user/jenkins/test-data/d1d74c93-0571-bf41-df07-26c5780742b7, hbase.cluster.distributed=false 2024-11-14T21:57:25,440 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:63118-0x100ac71eeff0000, quorum=127.0.0.1:56659, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T21:57:25,442 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=63118 2024-11-14T21:57:25,443 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=63118 2024-11-14T21:57:25,443 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=63118 2024-11-14T21:57:25,444 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=63118 2024-11-14T21:57:25,444 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=63118 2024-11-14T21:57:25,446 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/ef788a4cade8:0 server-side Connection retries=45 2024-11-14T21:57:25,447 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T21:57:25,447 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T21:57:25,447 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T21:57:25,447 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T21:57:25,447 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T21:57:25,447 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-14T21:57:25,447 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T21:57:25,451 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38561 2024-11-14T21:57:25,452 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38561 connecting to ZooKeeper ensemble=127.0.0.1:56659 2024-11-14T21:57:25,452 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T21:57:25,454 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T21:57:25,461 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:385610x0, quorum=127.0.0.1:56659, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T21:57:25,461 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:385610x0, quorum=127.0.0.1:56659, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T21:57:25,461 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-14T21:57:25,462 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38561-0x100ac71eeff0001 connected 2024-11-14T21:57:25,463 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-14T21:57:25,464 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38561-0x100ac71eeff0001, quorum=127.0.0.1:56659, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-14T21:57:25,465 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38561-0x100ac71eeff0001, quorum=127.0.0.1:56659, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T21:57:25,468 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38561 2024-11-14T21:57:25,470 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38561 2024-11-14T21:57:25,471 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38561 2024-11-14T21:57:25,471 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38561 2024-11-14T21:57:25,472 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38561 2024-11-14T21:57:25,488 DEBUG [M:0;ef788a4cade8:63118 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;ef788a4cade8:63118 2024-11-14T21:57:25,490 INFO [master/ef788a4cade8:63118:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/ef788a4cade8,63118,1731621445372 2024-11-14T21:57:25,492 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:63118-0x100ac71eeff0000, quorum=127.0.0.1:56659, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T21:57:25,492 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38561-0x100ac71eeff0001, quorum=127.0.0.1:56659, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T21:57:25,493 DEBUG [master/ef788a4cade8:63118:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:63118-0x100ac71eeff0000, quorum=127.0.0.1:56659, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/ef788a4cade8,63118,1731621445372 2024-11-14T21:57:25,495 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38561-0x100ac71eeff0001, quorum=127.0.0.1:56659, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-14T21:57:25,495 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38561-0x100ac71eeff0001, quorum=127.0.0.1:56659, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T21:57:25,495 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:63118-0x100ac71eeff0000, quorum=127.0.0.1:56659, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T21:57:25,496 DEBUG [master/ef788a4cade8:63118:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:63118-0x100ac71eeff0000, quorum=127.0.0.1:56659, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-14T21:57:25,496 INFO [master/ef788a4cade8:63118:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/ef788a4cade8,63118,1731621445372 from backup master directory 2024-11-14T21:57:25,498 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:63118-0x100ac71eeff0000, quorum=127.0.0.1:56659, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/ef788a4cade8,63118,1731621445372 2024-11-14T21:57:25,498 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38561-0x100ac71eeff0001, quorum=127.0.0.1:56659, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T21:57:25,498 WARN [master/ef788a4cade8:63118:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T21:57:25,498 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:63118-0x100ac71eeff0000, quorum=127.0.0.1:56659, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T21:57:25,498 INFO [master/ef788a4cade8:63118:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=ef788a4cade8,63118,1731621445372 2024-11-14T21:57:25,504 DEBUG [master/ef788a4cade8:63118:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:44917/user/jenkins/test-data/d1d74c93-0571-bf41-df07-26c5780742b7/hbase.id] with ID: cf540142-a243-4988-89db-1fa5717cf8dd 2024-11-14T21:57:25,504 DEBUG [master/ef788a4cade8:63118:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:44917/user/jenkins/test-data/d1d74c93-0571-bf41-df07-26c5780742b7/.tmp/hbase.id 2024-11-14T21:57:25,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39179 is added to blk_1073741826_1002 (size=42) 2024-11-14T21:57:25,912 DEBUG [master/ef788a4cade8:63118:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:44917/user/jenkins/test-data/d1d74c93-0571-bf41-df07-26c5780742b7/.tmp/hbase.id]:[hdfs://localhost:44917/user/jenkins/test-data/d1d74c93-0571-bf41-df07-26c5780742b7/hbase.id] 2024-11-14T21:57:25,943 INFO [master/ef788a4cade8:63118:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T21:57:25,944 INFO [master/ef788a4cade8:63118:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-14T21:57:25,946 INFO [master/ef788a4cade8:63118:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-14T21:57:25,948 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38561-0x100ac71eeff0001, quorum=127.0.0.1:56659, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T21:57:25,948 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:63118-0x100ac71eeff0000, quorum=127.0.0.1:56659, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T21:57:25,975 DEBUG [Time-limited test {}] regionserver.TestRegionServerReportForDuty(349): Waiting for master to come online ... 2024-11-14T21:57:25,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39179 is added to blk_1073741827_1003 (size=196) 2024-11-14T21:57:26,203 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/WALs/ef788a4cade8,52351,1731621131811/ef788a4cade8%2C52351%2C1731621131811.1731621136326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 12 more 2024-11-14T21:57:26,378 INFO [master/ef788a4cade8:63118:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-14T21:57:26,379 INFO [master/ef788a4cade8:63118:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-14T21:57:26,379 INFO [master/ef788a4cade8:63118:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-14T21:57:26,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39179 is added to blk_1073741828_1004 (size=1189) 2024-11-14T21:57:26,475 DEBUG [Time-limited test {}] regionserver.TestRegionServerReportForDuty(349): Waiting for master to come online ... 2024-11-14T21:57:26,802 INFO [master/ef788a4cade8:63118:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:44917/user/jenkins/test-data/d1d74c93-0571-bf41-df07-26c5780742b7/MasterData/data/master/store 2024-11-14T21:57:26,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39179 is added to blk_1073741829_1005 (size=34) 2024-11-14T21:57:26,827 DEBUG [master/ef788a4cade8:63118:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T21:57:26,828 DEBUG [master/ef788a4cade8:63118:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-14T21:57:26,828 INFO [master/ef788a4cade8:63118:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T21:57:26,828 DEBUG [master/ef788a4cade8:63118:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T21:57:26,828 DEBUG [master/ef788a4cade8:63118:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-14T21:57:26,828 DEBUG [master/ef788a4cade8:63118:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T21:57:26,828 INFO [master/ef788a4cade8:63118:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T21:57:26,828 DEBUG [master/ef788a4cade8:63118:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731621446827Disabling compacts and flushes for region at 1731621446827Disabling writes for close at 1731621446828 (+1 ms)Writing region close event to WAL at 1731621446828Closed at 1731621446828 2024-11-14T21:57:26,829 WARN [master/ef788a4cade8:63118:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:44917/user/jenkins/test-data/d1d74c93-0571-bf41-df07-26c5780742b7/MasterData/data/master/store/.initializing 2024-11-14T21:57:26,829 DEBUG [master/ef788a4cade8:63118:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:44917/user/jenkins/test-data/d1d74c93-0571-bf41-df07-26c5780742b7/MasterData/WALs/ef788a4cade8,63118,1731621445372 2024-11-14T21:57:26,831 INFO [master/ef788a4cade8:63118:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-14T21:57:26,834 INFO [master/ef788a4cade8:63118:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ef788a4cade8%2C63118%2C1731621445372, suffix=, logDir=hdfs://localhost:44917/user/jenkins/test-data/d1d74c93-0571-bf41-df07-26c5780742b7/MasterData/WALs/ef788a4cade8,63118,1731621445372, archiveDir=hdfs://localhost:44917/user/jenkins/test-data/d1d74c93-0571-bf41-df07-26c5780742b7/MasterData/oldWALs, maxLogs=10 2024-11-14T21:57:26,853 DEBUG [master/ef788a4cade8:63118:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/d1d74c93-0571-bf41-df07-26c5780742b7/MasterData/WALs/ef788a4cade8,63118,1731621445372/ef788a4cade8%2C63118%2C1731621445372.1731621446835, exclude list is [], retry=0 2024-11-14T21:57:26,857 DEBUG [HRegionServer-EventLoopGroup-9-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39179,DS-28990a7a-36c2-4424-a823-96ea0ab3631d,DISK] 2024-11-14T21:57:26,861 INFO [master/ef788a4cade8:63118:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d1d74c93-0571-bf41-df07-26c5780742b7/MasterData/WALs/ef788a4cade8,63118,1731621445372/ef788a4cade8%2C63118%2C1731621445372.1731621446835 2024-11-14T21:57:26,861 DEBUG [master/ef788a4cade8:63118:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42213:42213)] 2024-11-14T21:57:26,861 DEBUG [master/ef788a4cade8:63118:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-14T21:57:26,862 DEBUG [master/ef788a4cade8:63118:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T21:57:26,862 DEBUG [master/ef788a4cade8:63118:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T21:57:26,862 DEBUG [master/ef788a4cade8:63118:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T21:57:26,864 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T21:57:26,866 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-14T21:57:26,866 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T21:57:26,867 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T21:57:26,867 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T21:57:26,869 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-14T21:57:26,869 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T21:57:26,869 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T21:57:26,869 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T21:57:26,871 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-14T21:57:26,871 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T21:57:26,872 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T21:57:26,872 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T21:57:26,874 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-14T21:57:26,874 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T21:57:26,874 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T21:57:26,875 DEBUG [master/ef788a4cade8:63118:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T21:57:26,875 DEBUG [master/ef788a4cade8:63118:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44917/user/jenkins/test-data/d1d74c93-0571-bf41-df07-26c5780742b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-14T21:57:26,876 DEBUG [master/ef788a4cade8:63118:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44917/user/jenkins/test-data/d1d74c93-0571-bf41-df07-26c5780742b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-14T21:57:26,878 DEBUG [master/ef788a4cade8:63118:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T21:57:26,878 DEBUG [master/ef788a4cade8:63118:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T21:57:26,879 DEBUG [master/ef788a4cade8:63118:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-14T21:57:26,881 DEBUG [master/ef788a4cade8:63118:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T21:57:26,883 DEBUG [master/ef788a4cade8:63118:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44917/user/jenkins/test-data/d1d74c93-0571-bf41-df07-26c5780742b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T21:57:26,884 INFO [master/ef788a4cade8:63118:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71031362, jitterRate=0.058449774980545044}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-14T21:57:26,884 DEBUG [master/ef788a4cade8:63118:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731621446862Initializing all the Stores at 1731621446863 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731621446863Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731621446864 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731621446864Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731621446864Cleaning up temporary data from old regions at 1731621446878 (+14 ms)Region opened successfully at 1731621446884 (+6 ms) 2024-11-14T21:57:26,885 INFO [master/ef788a4cade8:63118:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-14T21:57:26,890 DEBUG [master/ef788a4cade8:63118:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5714c343, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=ef788a4cade8/172.17.0.2:0 2024-11-14T21:57:26,891 INFO [master/ef788a4cade8:63118:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-14T21:57:26,892 INFO [master/ef788a4cade8:63118:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-14T21:57:26,892 INFO [master/ef788a4cade8:63118:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-14T21:57:26,892 INFO [master/ef788a4cade8:63118:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-14T21:57:26,892 INFO [master/ef788a4cade8:63118:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-14T21:57:26,893 INFO [master/ef788a4cade8:63118:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-14T21:57:26,893 INFO [master/ef788a4cade8:63118:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-14T21:57:26,895 INFO [master/ef788a4cade8:63118:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-14T21:57:26,897 DEBUG [master/ef788a4cade8:63118:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:63118-0x100ac71eeff0000, quorum=127.0.0.1:56659, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-14T21:57:26,898 DEBUG [master/ef788a4cade8:63118:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-14T21:57:26,899 INFO [master/ef788a4cade8:63118:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-14T21:57:26,900 DEBUG [master/ef788a4cade8:63118:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:63118-0x100ac71eeff0000, quorum=127.0.0.1:56659, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-14T21:57:26,903 DEBUG [master/ef788a4cade8:63118:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-14T21:57:26,903 INFO [master/ef788a4cade8:63118:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-14T21:57:26,909 DEBUG [master/ef788a4cade8:63118:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:63118-0x100ac71eeff0000, quorum=127.0.0.1:56659, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-14T21:57:26,912 DEBUG [master/ef788a4cade8:63118:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-14T21:57:26,916 DEBUG [master/ef788a4cade8:63118:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:63118-0x100ac71eeff0000, quorum=127.0.0.1:56659, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-14T21:57:26,923 DEBUG [master/ef788a4cade8:63118:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-14T21:57:26,932 DEBUG [master/ef788a4cade8:63118:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:63118-0x100ac71eeff0000, quorum=127.0.0.1:56659, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-14T21:57:26,934 DEBUG [master/ef788a4cade8:63118:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-14T21:57:26,939 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38561-0x100ac71eeff0001, quorum=127.0.0.1:56659, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-14T21:57:26,940 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38561-0x100ac71eeff0001, quorum=127.0.0.1:56659, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T21:57:26,942 INFO [RS:0;ef788a4cade8:38561 {}] regionserver.HRegionServer(746): ClusterId : cf540142-a243-4988-89db-1fa5717cf8dd 2024-11-14T21:57:26,942 DEBUG [RS:0;ef788a4cade8:38561 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-14T21:57:26,942 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:63118-0x100ac71eeff0000, quorum=127.0.0.1:56659, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-14T21:57:26,942 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:63118-0x100ac71eeff0000, quorum=127.0.0.1:56659, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T21:57:26,943 INFO [master/ef788a4cade8:63118:becomeActiveMaster {}] master.HMaster(856): Active/primary master=ef788a4cade8,63118,1731621445372, sessionid=0x100ac71eeff0000, setting cluster-up flag (Was=false) 2024-11-14T21:57:26,948 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38561-0x100ac71eeff0001, quorum=127.0.0.1:56659, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T21:57:26,949 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:63118-0x100ac71eeff0000, quorum=127.0.0.1:56659, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T21:57:26,957 DEBUG [RS:0;ef788a4cade8:38561 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-14T21:57:26,957 DEBUG [RS:0;ef788a4cade8:38561 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-14T21:57:26,958 DEBUG [master/ef788a4cade8:63118:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-14T21:57:26,960 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38561-0x100ac71eeff0001, quorum=127.0.0.1:56659, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T21:57:26,960 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:63118-0x100ac71eeff0000, quorum=127.0.0.1:56659, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T21:57:26,963 DEBUG [master/ef788a4cade8:63118:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=ef788a4cade8,63118,1731621445372 2024-11-14T21:57:26,975 DEBUG [Time-limited test {}] regionserver.TestRegionServerReportForDuty(349): Waiting for master to come online ... 2024-11-14T21:57:26,979 DEBUG [master/ef788a4cade8:63118:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-14T21:57:26,979 DEBUG [RS:0;ef788a4cade8:38561 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-14T21:57:26,979 DEBUG [RS:0;ef788a4cade8:38561 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6b214921, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=ef788a4cade8/172.17.0.2:0 2024-11-14T21:57:26,981 DEBUG [master/ef788a4cade8:63118:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=ef788a4cade8,63118,1731621445372 2024-11-14T21:57:26,984 INFO [master/ef788a4cade8:63118:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:44917/user/jenkins/test-data/d1d74c93-0571-bf41-df07-26c5780742b7/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-14T21:57:26,988 DEBUG [master/ef788a4cade8:63118:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-14T21:57:26,989 INFO [master/ef788a4cade8:63118:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-14T21:57:26,989 INFO [master/ef788a4cade8:63118:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-14T21:57:26,989 DEBUG [master/ef788a4cade8:63118:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: ef788a4cade8,63118,1731621445372 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-14T21:57:26,991 DEBUG [master/ef788a4cade8:63118:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/ef788a4cade8:63118, corePoolSize=5, maxPoolSize=5 2024-11-14T21:57:26,991 DEBUG [master/ef788a4cade8:63118:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/ef788a4cade8:63118, corePoolSize=5, maxPoolSize=5 2024-11-14T21:57:26,991 DEBUG [master/ef788a4cade8:63118:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/ef788a4cade8:63118, corePoolSize=5, maxPoolSize=5 2024-11-14T21:57:26,991 DEBUG [master/ef788a4cade8:63118:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/ef788a4cade8:63118, corePoolSize=5, maxPoolSize=5 2024-11-14T21:57:26,991 DEBUG [master/ef788a4cade8:63118:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/ef788a4cade8:63118, corePoolSize=10, maxPoolSize=10 2024-11-14T21:57:26,991 DEBUG [master/ef788a4cade8:63118:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/ef788a4cade8:63118, corePoolSize=1, maxPoolSize=1 2024-11-14T21:57:26,991 DEBUG [master/ef788a4cade8:63118:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/ef788a4cade8:63118, corePoolSize=2, maxPoolSize=2 2024-11-14T21:57:26,991 DEBUG [master/ef788a4cade8:63118:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/ef788a4cade8:63118, corePoolSize=1, maxPoolSize=1 2024-11-14T21:57:26,995 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T21:57:26,995 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-14T21:57:26,996 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T21:57:26,997 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-14T21:57:26,998 INFO [master/ef788a4cade8:63118:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731621476998 2024-11-14T21:57:26,998 INFO [master/ef788a4cade8:63118:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-14T21:57:26,999 DEBUG [RS:0;ef788a4cade8:38561 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;ef788a4cade8:38561 2024-11-14T21:57:26,999 INFO [master/ef788a4cade8:63118:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-14T21:57:26,999 INFO [master/ef788a4cade8:63118:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-14T21:57:26,999 INFO [RS:0;ef788a4cade8:38561 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-14T21:57:26,999 INFO [master/ef788a4cade8:63118:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-14T21:57:26,999 INFO [RS:0;ef788a4cade8:38561 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-14T21:57:26,999 INFO [master/ef788a4cade8:63118:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-14T21:57:26,999 DEBUG [RS:0;ef788a4cade8:38561 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-14T21:57:26,999 INFO [master/ef788a4cade8:63118:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-14T21:57:26,999 INFO [master/ef788a4cade8:63118:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-14T21:57:27,000 INFO [RS:0;ef788a4cade8:38561 {}] regionserver.HRegionServer(2659): reportForDuty to master=ef788a4cade8,63118,1731621445372 with port=38561, startcode=1731621445446 2024-11-14T21:57:27,001 DEBUG [RS:0;ef788a4cade8:38561 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-14T21:57:27,010 INFO [master/ef788a4cade8:63118:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-14T21:57:27,010 INFO [master/ef788a4cade8:63118:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-14T21:57:27,010 INFO [master/ef788a4cade8:63118:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-14T21:57:27,010 INFO [master/ef788a4cade8:63118:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-14T21:57:27,011 INFO [master/ef788a4cade8:63118:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-14T21:57:27,011 DEBUG [master/ef788a4cade8:63118:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/ef788a4cade8:63118:becomeActiveMaster-HFileCleaner.large.0-1731621447011,5,FailOnTimeoutGroup] 2024-11-14T21:57:27,012 INFO [HMaster-EventLoopGroup-8-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51167, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=RegionServerStatusService 2024-11-14T21:57:27,012 DEBUG [master/ef788a4cade8:63118:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/ef788a4cade8:63118:becomeActiveMaster-HFileCleaner.small.0-1731621447011,5,FailOnTimeoutGroup] 2024-11-14T21:57:27,012 INFO [master/ef788a4cade8:63118:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-14T21:57:27,013 INFO [master/ef788a4cade8:63118:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-14T21:57:27,013 INFO [master/ef788a4cade8:63118:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-14T21:57:27,013 INFO [master/ef788a4cade8:63118:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-14T21:57:27,014 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=63118 {}] master.ServerManager(363): Checking decommissioned status of RegionServer ef788a4cade8,38561,1731621445446 2024-11-14T21:57:27,014 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=63118 {}] master.ServerManager(517): Registering regionserver=ef788a4cade8,38561,1731621445446 2024-11-14T21:57:27,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39179 is added to blk_1073741831_1007 (size=1321) 2024-11-14T21:57:27,020 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:44917/user/jenkins/test-data/d1d74c93-0571-bf41-df07-26c5780742b7/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-14T21:57:27,021 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:44917/user/jenkins/test-data/d1d74c93-0571-bf41-df07-26c5780742b7 2024-11-14T21:57:27,023 DEBUG [RS:0;ef788a4cade8:38561 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44917/user/jenkins/test-data/d1d74c93-0571-bf41-df07-26c5780742b7 2024-11-14T21:57:27,023 DEBUG [RS:0;ef788a4cade8:38561 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44917 2024-11-14T21:57:27,023 DEBUG [RS:0;ef788a4cade8:38561 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-14T21:57:27,026 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:63118-0x100ac71eeff0000, quorum=127.0.0.1:56659, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T21:57:27,027 DEBUG [RS:0;ef788a4cade8:38561 {}] zookeeper.ZKUtil(111): regionserver:38561-0x100ac71eeff0001, quorum=127.0.0.1:56659, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/ef788a4cade8,38561,1731621445446 2024-11-14T21:57:27,027 WARN [RS:0;ef788a4cade8:38561 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T21:57:27,027 INFO [RS:0;ef788a4cade8:38561 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-14T21:57:27,028 DEBUG [RS:0;ef788a4cade8:38561 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44917/user/jenkins/test-data/d1d74c93-0571-bf41-df07-26c5780742b7/WALs/ef788a4cade8,38561,1731621445446 2024-11-14T21:57:27,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39179 is added to blk_1073741832_1008 (size=32) 2024-11-14T21:57:27,029 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [ef788a4cade8,38561,1731621445446] 2024-11-14T21:57:27,031 INFO [RS:0;ef788a4cade8:38561 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-14T21:57:27,034 INFO [RS:0;ef788a4cade8:38561 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-14T21:57:27,035 INFO [RS:0;ef788a4cade8:38561 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-14T21:57:27,035 INFO [RS:0;ef788a4cade8:38561 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T21:57:27,035 INFO [RS:0;ef788a4cade8:38561 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-14T21:57:27,036 INFO [RS:0;ef788a4cade8:38561 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-14T21:57:27,036 INFO [RS:0;ef788a4cade8:38561 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-14T21:57:27,036 DEBUG [RS:0;ef788a4cade8:38561 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/ef788a4cade8:0, corePoolSize=1, maxPoolSize=1 2024-11-14T21:57:27,036 DEBUG [RS:0;ef788a4cade8:38561 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/ef788a4cade8:0, corePoolSize=1, maxPoolSize=1 2024-11-14T21:57:27,037 DEBUG [RS:0;ef788a4cade8:38561 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/ef788a4cade8:0, corePoolSize=1, maxPoolSize=1 2024-11-14T21:57:27,037 DEBUG [RS:0;ef788a4cade8:38561 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/ef788a4cade8:0, corePoolSize=1, maxPoolSize=1 2024-11-14T21:57:27,037 DEBUG [RS:0;ef788a4cade8:38561 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/ef788a4cade8:0, corePoolSize=1, maxPoolSize=1 2024-11-14T21:57:27,037 DEBUG [RS:0;ef788a4cade8:38561 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/ef788a4cade8:0, corePoolSize=2, maxPoolSize=2 2024-11-14T21:57:27,037 DEBUG [RS:0;ef788a4cade8:38561 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/ef788a4cade8:0, corePoolSize=1, maxPoolSize=1 2024-11-14T21:57:27,037 DEBUG [RS:0;ef788a4cade8:38561 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/ef788a4cade8:0, corePoolSize=1, maxPoolSize=1 2024-11-14T21:57:27,037 DEBUG [RS:0;ef788a4cade8:38561 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/ef788a4cade8:0, corePoolSize=1, maxPoolSize=1 2024-11-14T21:57:27,037 DEBUG [RS:0;ef788a4cade8:38561 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/ef788a4cade8:0, corePoolSize=1, maxPoolSize=1 2024-11-14T21:57:27,037 DEBUG [RS:0;ef788a4cade8:38561 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/ef788a4cade8:0, corePoolSize=1, maxPoolSize=1 2024-11-14T21:57:27,037 DEBUG [RS:0;ef788a4cade8:38561 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/ef788a4cade8:0, corePoolSize=1, maxPoolSize=1 2024-11-14T21:57:27,037 DEBUG [RS:0;ef788a4cade8:38561 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/ef788a4cade8:0, corePoolSize=3, maxPoolSize=3 2024-11-14T21:57:27,037 DEBUG [RS:0;ef788a4cade8:38561 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/ef788a4cade8:0, corePoolSize=3, maxPoolSize=3 2024-11-14T21:57:27,038 INFO [RS:0;ef788a4cade8:38561 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T21:57:27,038 INFO [RS:0;ef788a4cade8:38561 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T21:57:27,038 INFO [RS:0;ef788a4cade8:38561 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T21:57:27,038 INFO [RS:0;ef788a4cade8:38561 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-14T21:57:27,038 INFO [RS:0;ef788a4cade8:38561 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-14T21:57:27,038 INFO [RS:0;ef788a4cade8:38561 {}] hbase.ChoreService(168): Chore ScheduledChore name=ef788a4cade8,38561,1731621445446-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T21:57:27,060 INFO [RS:0;ef788a4cade8:38561 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-14T21:57:27,060 INFO [RS:0;ef788a4cade8:38561 {}] hbase.ChoreService(168): Chore ScheduledChore name=ef788a4cade8,38561,1731621445446-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T21:57:27,061 INFO [RS:0;ef788a4cade8:38561 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T21:57:27,061 INFO [RS:0;ef788a4cade8:38561 {}] regionserver.Replication(171): ef788a4cade8,38561,1731621445446 started 2024-11-14T21:57:27,083 INFO [RS:0;ef788a4cade8:38561 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T21:57:27,083 INFO [RS:0;ef788a4cade8:38561 {}] regionserver.HRegionServer(1482): Serving as ef788a4cade8,38561,1731621445446, RpcServer on ef788a4cade8/172.17.0.2:38561, sessionid=0x100ac71eeff0001 2024-11-14T21:57:27,083 DEBUG [RS:0;ef788a4cade8:38561 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-14T21:57:27,083 DEBUG [RS:0;ef788a4cade8:38561 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager ef788a4cade8,38561,1731621445446 2024-11-14T21:57:27,083 DEBUG [RS:0;ef788a4cade8:38561 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'ef788a4cade8,38561,1731621445446' 2024-11-14T21:57:27,083 DEBUG [RS:0;ef788a4cade8:38561 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-14T21:57:27,084 DEBUG [RS:0;ef788a4cade8:38561 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-14T21:57:27,085 DEBUG [RS:0;ef788a4cade8:38561 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-14T21:57:27,085 DEBUG [RS:0;ef788a4cade8:38561 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-14T21:57:27,085 DEBUG [RS:0;ef788a4cade8:38561 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager ef788a4cade8,38561,1731621445446 2024-11-14T21:57:27,085 DEBUG [RS:0;ef788a4cade8:38561 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'ef788a4cade8,38561,1731621445446' 2024-11-14T21:57:27,086 DEBUG [RS:0;ef788a4cade8:38561 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-14T21:57:27,086 DEBUG [RS:0;ef788a4cade8:38561 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-14T21:57:27,087 DEBUG [RS:0;ef788a4cade8:38561 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-14T21:57:27,087 INFO [RS:0;ef788a4cade8:38561 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-14T21:57:27,087 INFO [RS:0;ef788a4cade8:38561 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-14T21:57:27,188 INFO [RS:0;ef788a4cade8:38561 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-14T21:57:27,196 INFO [RS:0;ef788a4cade8:38561 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ef788a4cade8%2C38561%2C1731621445446, suffix=, logDir=hdfs://localhost:44917/user/jenkins/test-data/d1d74c93-0571-bf41-df07-26c5780742b7/WALs/ef788a4cade8,38561,1731621445446, archiveDir=hdfs://localhost:44917/user/jenkins/test-data/d1d74c93-0571-bf41-df07-26c5780742b7/oldWALs, maxLogs=32 2024-11-14T21:57:27,206 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/WALs/ef788a4cade8,52351,1731621131811/ef788a4cade8%2C52351%2C1731621131811.1731621136326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 12 more 2024-11-14T21:57:27,222 DEBUG [RS:0;ef788a4cade8:38561 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/d1d74c93-0571-bf41-df07-26c5780742b7/WALs/ef788a4cade8,38561,1731621445446/ef788a4cade8%2C38561%2C1731621445446.1731621447198, exclude list is [], retry=0 2024-11-14T21:57:27,228 DEBUG [HRegionServer-EventLoopGroup-9-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39179,DS-28990a7a-36c2-4424-a823-96ea0ab3631d,DISK] 2024-11-14T21:57:27,236 INFO [RS:0;ef788a4cade8:38561 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d1d74c93-0571-bf41-df07-26c5780742b7/WALs/ef788a4cade8,38561,1731621445446/ef788a4cade8%2C38561%2C1731621445446.1731621447198 2024-11-14T21:57:27,239 DEBUG [RS:0;ef788a4cade8:38561 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42213:42213)] 2024-11-14T21:57:27,430 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T21:57:27,436 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-14T21:57:27,439 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-14T21:57:27,439 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T21:57:27,447 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T21:57:27,447 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-14T21:57:27,450 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-14T21:57:27,450 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T21:57:27,457 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T21:57:27,458 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-14T21:57:27,460 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-14T21:57:27,460 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T21:57:27,461 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T21:57:27,461 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-14T21:57:27,463 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-14T21:57:27,463 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T21:57:27,464 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T21:57:27,464 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-14T21:57:27,465 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44917/user/jenkins/test-data/d1d74c93-0571-bf41-df07-26c5780742b7/data/hbase/meta/1588230740 2024-11-14T21:57:27,466 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44917/user/jenkins/test-data/d1d74c93-0571-bf41-df07-26c5780742b7/data/hbase/meta/1588230740 2024-11-14T21:57:27,468 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-14T21:57:27,468 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-14T21:57:27,469 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-14T21:57:27,470 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-14T21:57:27,475 DEBUG [Time-limited test {}] regionserver.TestRegionServerReportForDuty(349): Waiting for master to come online ... 2024-11-14T21:57:27,477 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44917/user/jenkins/test-data/d1d74c93-0571-bf41-df07-26c5780742b7/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T21:57:27,479 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60121049, jitterRate=-0.10412655770778656}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-14T21:57:27,481 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731621447430Initializing all the Stores at 1731621447435 (+5 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731621447435Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731621447435Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731621447435Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731621447435Cleaning up temporary data from old regions at 1731621447468 (+33 ms)Region opened successfully at 1731621447481 (+13 ms) 2024-11-14T21:57:27,482 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-14T21:57:27,482 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-14T21:57:27,482 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-14T21:57:27,482 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-14T21:57:27,482 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-14T21:57:27,511 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-14T21:57:27,511 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731621447482Disabling compacts and flushes for region at 1731621447482Disabling writes for close at 1731621447482Writing region close event to WAL at 1731621447511 (+29 ms)Closed at 1731621447511 2024-11-14T21:57:27,518 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T21:57:27,519 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-14T21:57:27,519 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-14T21:57:27,522 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-14T21:57:27,525 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-14T21:57:27,681 DEBUG [ef788a4cade8:63118 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-14T21:57:27,681 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=ef788a4cade8,38561,1731621445446 2024-11-14T21:57:27,686 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as ef788a4cade8,38561,1731621445446, state=OPENING 2024-11-14T21:57:27,689 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-14T21:57:27,695 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38561-0x100ac71eeff0001, quorum=127.0.0.1:56659, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T21:57:27,696 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:63118-0x100ac71eeff0000, quorum=127.0.0.1:56659, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T21:57:27,697 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T21:57:27,697 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T21:57:27,698 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-14T21:57:27,698 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=ef788a4cade8,38561,1731621445446}] 2024-11-14T21:57:27,863 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-14T21:57:27,876 INFO [HRegionServer-EventLoopGroup-9-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47759, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-14T21:57:27,883 INFO [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-14T21:57:27,883 INFO [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-14T21:57:27,884 INFO [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-14T21:57:27,886 INFO [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ef788a4cade8%2C38561%2C1731621445446.meta, suffix=.meta, logDir=hdfs://localhost:44917/user/jenkins/test-data/d1d74c93-0571-bf41-df07-26c5780742b7/WALs/ef788a4cade8,38561,1731621445446, archiveDir=hdfs://localhost:44917/user/jenkins/test-data/d1d74c93-0571-bf41-df07-26c5780742b7/oldWALs, maxLogs=32 2024-11-14T21:57:27,911 DEBUG [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/d1d74c93-0571-bf41-df07-26c5780742b7/WALs/ef788a4cade8,38561,1731621445446/ef788a4cade8%2C38561%2C1731621445446.meta.1731621447888.meta, exclude list is [], retry=0 2024-11-14T21:57:27,916 DEBUG [HRegionServer-EventLoopGroup-9-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39179,DS-28990a7a-36c2-4424-a823-96ea0ab3631d,DISK] 2024-11-14T21:57:27,943 INFO [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d1d74c93-0571-bf41-df07-26c5780742b7/WALs/ef788a4cade8,38561,1731621445446/ef788a4cade8%2C38561%2C1731621445446.meta.1731621447888.meta 2024-11-14T21:57:27,954 DEBUG [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42213:42213)] 2024-11-14T21:57:27,955 DEBUG [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-14T21:57:27,955 DEBUG [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-14T21:57:27,956 DEBUG [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-14T21:57:27,956 INFO [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-14T21:57:27,956 DEBUG [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-14T21:57:27,956 DEBUG [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T21:57:27,956 DEBUG [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-14T21:57:27,956 DEBUG [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-14T21:57:27,967 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-14T21:57:27,971 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-14T21:57:27,971 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T21:57:27,974 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T21:57:27,975 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-14T21:57:27,975 DEBUG [Time-limited test {}] regionserver.TestRegionServerReportForDuty(349): Waiting for master to come online ... 2024-11-14T21:57:27,978 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-14T21:57:27,978 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T21:57:27,979 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T21:57:27,980 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-14T21:57:27,982 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-14T21:57:27,982 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T21:57:27,983 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T21:57:27,983 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-14T21:57:27,985 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-14T21:57:27,986 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T21:57:27,990 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T21:57:27,990 DEBUG [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-14T21:57:27,992 DEBUG [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44917/user/jenkins/test-data/d1d74c93-0571-bf41-df07-26c5780742b7/data/hbase/meta/1588230740 2024-11-14T21:57:27,994 DEBUG [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44917/user/jenkins/test-data/d1d74c93-0571-bf41-df07-26c5780742b7/data/hbase/meta/1588230740 2024-11-14T21:57:28,001 DEBUG [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-14T21:57:28,001 DEBUG [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-14T21:57:28,002 DEBUG [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-14T21:57:28,017 DEBUG [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-14T21:57:28,023 INFO [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74865270, jitterRate=0.11557945609092712}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-14T21:57:28,023 DEBUG [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-14T21:57:28,026 DEBUG [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731621447957Writing region info on filesystem at 1731621447957Initializing all the Stores at 1731621447959 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731621447959Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731621447962 (+3 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731621447962Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731621447962Cleaning up temporary data from old regions at 1731621448001 (+39 ms)Running coprocessor post-open hooks at 1731621448023 (+22 ms)Region opened successfully at 1731621448026 (+3 ms) 2024-11-14T21:57:28,029 INFO [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731621447862 2024-11-14T21:57:28,034 DEBUG [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-14T21:57:28,034 INFO [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-14T21:57:28,035 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=ef788a4cade8,38561,1731621445446 2024-11-14T21:57:28,037 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as ef788a4cade8,38561,1731621445446, state=OPEN 2024-11-14T21:57:28,038 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38561-0x100ac71eeff0001, quorum=127.0.0.1:56659, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-14T21:57:28,038 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:63118-0x100ac71eeff0000, quorum=127.0.0.1:56659, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-14T21:57:28,038 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T21:57:28,038 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=ef788a4cade8,38561,1731621445446 2024-11-14T21:57:28,038 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T21:57:28,045 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-14T21:57:28,045 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=ef788a4cade8,38561,1731621445446 in 340 msec 2024-11-14T21:57:28,050 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-14T21:57:28,050 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 526 msec 2024-11-14T21:57:28,052 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T21:57:28,052 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-14T21:57:28,054 DEBUG [PEWorker-1 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-14T21:57:28,054 DEBUG [PEWorker-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ef788a4cade8,38561,1731621445446, seqNum=-1] 2024-11-14T21:57:28,055 DEBUG [PEWorker-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-14T21:57:28,057 INFO [HRegionServer-EventLoopGroup-9-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59783, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-14T21:57:28,081 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.0850 sec 2024-11-14T21:57:28,081 INFO [master/ef788a4cade8:63118:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731621448081, completionTime=-1 2024-11-14T21:57:28,082 INFO [master/ef788a4cade8:63118:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-14T21:57:28,082 DEBUG [master/ef788a4cade8:63118:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-14T21:57:28,086 INFO [master/ef788a4cade8:63118:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-14T21:57:28,086 INFO [master/ef788a4cade8:63118:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731621508086 2024-11-14T21:57:28,086 INFO [master/ef788a4cade8:63118:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731621568086 2024-11-14T21:57:28,086 INFO [master/ef788a4cade8:63118:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 4 msec 2024-11-14T21:57:28,087 INFO [master/ef788a4cade8:63118:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef788a4cade8,63118,1731621445372-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T21:57:28,087 INFO [master/ef788a4cade8:63118:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef788a4cade8,63118,1731621445372-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T21:57:28,087 INFO [master/ef788a4cade8:63118:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef788a4cade8,63118,1731621445372-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T21:57:28,087 INFO [master/ef788a4cade8:63118:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-ef788a4cade8:63118, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T21:57:28,087 INFO [master/ef788a4cade8:63118:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-14T21:57:28,087 INFO [master/ef788a4cade8:63118:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-14T21:57:28,100 DEBUG [master/ef788a4cade8:63118.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-14T21:57:28,102 INFO [master/ef788a4cade8:63118:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.604sec 2024-11-14T21:57:28,102 INFO [master/ef788a4cade8:63118:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-14T21:57:28,102 INFO [master/ef788a4cade8:63118:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-14T21:57:28,103 INFO [master/ef788a4cade8:63118:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-14T21:57:28,103 INFO [master/ef788a4cade8:63118:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-14T21:57:28,103 INFO [master/ef788a4cade8:63118:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-14T21:57:28,103 INFO [master/ef788a4cade8:63118:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef788a4cade8,63118,1731621445372-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T21:57:28,103 INFO [master/ef788a4cade8:63118:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef788a4cade8,63118,1731621445372-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-14T21:57:28,115 DEBUG [master/ef788a4cade8:63118:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-14T21:57:28,115 INFO [master/ef788a4cade8:63118:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-14T21:57:28,115 INFO [master/ef788a4cade8:63118:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef788a4cade8,63118,1731621445372-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T21:57:28,206 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/WALs/ef788a4cade8,52351,1731621131811/ef788a4cade8%2C52351%2C1731621131811.1731621136326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 12 more 2024-11-14T21:57:28,476 DEBUG [Time-limited test {}] regionserver.TestRegionServerReportForDuty(349): Waiting for master to come online ... 2024-11-14 21:57:28,478 Time-limited test DEBUG PluginManager 'Converter' found 47 plugins 2024-11-14 21:57:28,482 Time-limited test DEBUG Starting WriterManager java.io.StringWriter@1a4860df.false 2024-11-14T21:57:28,486 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/ef788a4cade8:0 server-side Connection retries=45 2024-11-14T21:57:28,486 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T21:57:28,486 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T21:57:28,486 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T21:57:28,486 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T21:57:28,486 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T21:57:28,486 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-14T21:57:28,487 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T21:57:28,488 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34815 2024-11-14T21:57:28,489 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34815 connecting to ZooKeeper ensemble=127.0.0.1:56659 2024-11-14T21:57:28,489 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T21:57:28,492 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T21:57:28,496 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:348150x0, quorum=127.0.0.1:56659, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T21:57:28,497 DEBUG [pool-420-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-11-14T21:57:28,497 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:34815-0x100ac71eeff0002, quorum=127.0.0.1:56659, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-11-14T21:57:28,497 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34815-0x100ac71eeff0002 connected 2024-11-14T21:57:28,498 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-14T21:57:28,498 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-14T21:57:28,499 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:34815-0x100ac71eeff0002, quorum=127.0.0.1:56659, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-14T21:57:28,501 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34815-0x100ac71eeff0002, quorum=127.0.0.1:56659, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T21:57:28,506 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34815 2024-11-14T21:57:28,510 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34815 2024-11-14T21:57:28,511 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34815 2024-11-14T21:57:28,511 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34815 2024-11-14T21:57:28,511 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34815 2024-11-14T21:57:28,514 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:63118-0x100ac71eeff0000, quorum=127.0.0.1:56659, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/draining 2024-11-14T21:57:28,514 INFO [Time-limited test {}] master.ServerManager(767): Server ef788a4cade8,34815,1731621448485 added to draining server list. 2024-11-14 21:57:28,516 RS:1;ef788a4cade8:34815 ERROR Attempted to append to non-started appender test 2024-11-14T21:57:28,516 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:63118-0x100ac71eeff0000, quorum=127.0.0.1:56659, baseZNode=/hbase Set watcher on existing znode=/hbase/draining/ef788a4cade8,34815,1731621448485 2024-11-14T21:57:28,515 INFO [RS:1;ef788a4cade8:34815 {}] regionserver.HRegionServer(746): ClusterId : cf540142-a243-4988-89db-1fa5717cf8dd 2024-11-14T21:57:28,516 DEBUG [RS:1;ef788a4cade8:34815 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-14T21:57:28,516 WARN [zk-event-processor-pool-0 {}] master.ServerManager(762): Server ef788a4cade8,34815,1731621448485 is already in the draining server list. Ignoring request to add it again. 2024-11-14T21:57:28,517 INFO [zk-event-processor-pool-0 {}] master.DrainingServerTracker(92): Draining RS node created, adding to list [ef788a4cade8,34815,1731621448485] 2024-11-14T21:57:28,519 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [90,000] milli-secs(wait.for.ratio=[1]) 2024-11-14T21:57:28,519 DEBUG [RS:1;ef788a4cade8:34815 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-14T21:57:28,519 DEBUG [RS:1;ef788a4cade8:34815 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-14T21:57:28,522 DEBUG [RS:1;ef788a4cade8:34815 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-14T21:57:28,522 DEBUG [RS:1;ef788a4cade8:34815 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4a49fde4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=ef788a4cade8/172.17.0.2:0 2024-11-14T21:57:28,542 DEBUG [RS:1;ef788a4cade8:34815 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;ef788a4cade8:34815 2024-11-14T21:57:28,543 INFO [RS:1;ef788a4cade8:34815 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-14T21:57:28,543 INFO [RS:1;ef788a4cade8:34815 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-14 21:57:28,544 RS:1;ef788a4cade8:34815 ERROR Attempted to append to non-started appender test 2024-11-14T21:57:28,543 DEBUG [RS:1;ef788a4cade8:34815 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-14 21:57:28,552 RS:1;ef788a4cade8:34815 ERROR Attempted to append to non-started appender test 2024-11-14T21:57:28,552 INFO [RS:1;ef788a4cade8:34815 {}] regionserver.HRegionServer(2659): reportForDuty to master=ef788a4cade8,63118,1731621445372 with port=34815, startcode=1731621448485 2024-11-14T21:57:28,553 DEBUG [RS:1;ef788a4cade8:34815 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-14T21:57:28,562 INFO [HMaster-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57301, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=RegionServerStatusService 2024-11-14T21:57:28,563 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=63118 {}] master.ServerManager(363): Checking decommissioned status of RegionServer ef788a4cade8,34815,1731621448485 2024-11-14T21:57:28,563 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=63118 {}] master.ServerManager(374): Rejecting RegionServer ef788a4cade8,34815,1731621448485 from reporting for duty because Master is configured to reject decommissioned hosts and this host was marked as such in the past. 2024-11-14T21:57:28,564 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=63118 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.DecommissionedHostRejectedException: Host ef788a4cade8 exists in the list of decommissioned servers and Master is configured to reject decommissioned hosts at org.apache.hadoop.hbase.master.ServerManager.checkRejectableDecommissionedStatus(ServerManager.java:378) ~[classes/:?] at org.apache.hadoop.hbase.master.ServerManager.regionServerStartup(ServerManager.java:267) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:679) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-14T21:57:28,564 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=63118 {}] ipc.CallRunner(138): callId: 0 service: RegionServerStatusService methodName: RegionServerStartup size: 52 connection: 172.17.0.2:57301 deadline: 1731621458562, exception=org.apache.hadoop.hbase.ipc.DecommissionedHostRejectedException: Host ef788a4cade8 exists in the list of decommissioned servers and Master is configured to reject decommissioned hosts 2024-11-14 21:57:28,565 RS:1;ef788a4cade8:34815 ERROR Attempted to append to non-started appender test 2024-11-14T21:57:28,565 ERROR [RS:1;ef788a4cade8:34815 {}] regionserver.HRegionServer(2678): Master rejected startup because the host is considered decommissioned org.apache.hadoop.hbase.ipc.DecommissionedHostRejectedException: org.apache.hadoop.hbase.ipc.DecommissionedHostRejectedException: Host ef788a4cade8 exists in the list of decommissioned servers and Master is configured to reject decommissioned hosts at org.apache.hadoop.hbase.master.ServerManager.checkRejectableDecommissionedStatus(ServerManager.java:378) at org.apache.hadoop.hbase.master.ServerManager.regionServerStartup(ServerManager.java:267) at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:679) at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:403) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil.getRemoteException(ProtobufUtil.java:381) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportForDuty(HRegionServer.java:2672) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegionServer.lambda$run$1(HRegionServer.java:838) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:833) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.ipc.DecommissionedHostRejectedException: Host ef788a4cade8 exists in the list of decommissioned servers and Master is configured to reject decommissioned hosts at org.apache.hadoop.hbase.master.ServerManager.checkRejectableDecommissionedStatus(ServerManager.java:378) at org.apache.hadoop.hbase.master.ServerManager.regionServerStartup(ServerManager.java:267) at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:679) at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollStreamChannel$EpollStreamUnsafe.epollInReady(AbstractEpollStreamChannel.java:799) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:501) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] ... 1 more 2024-11-14T21:57:28,567 ERROR [RS:1;ef788a4cade8:34815 {}] regionserver.HRegionServer(2440): ***** ABORTING region server ef788a4cade8,34815,1731621448485: Unhandled: org.apache.hadoop.hbase.ipc.DecommissionedHostRejectedException: Host ef788a4cade8 exists in the list of decommissioned servers and Master is configured to reject decommissioned hosts at org.apache.hadoop.hbase.master.ServerManager.checkRejectableDecommissionedStatus(ServerManager.java:378) at org.apache.hadoop.hbase.master.ServerManager.regionServerStartup(ServerManager.java:267) at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:679) at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ***** org.apache.hadoop.hbase.ipc.DecommissionedHostRejectedException: org.apache.hadoop.hbase.ipc.DecommissionedHostRejectedException: Host ef788a4cade8 exists in the list of decommissioned servers and Master is configured to reject decommissioned hosts at org.apache.hadoop.hbase.master.ServerManager.checkRejectableDecommissionedStatus(ServerManager.java:378) at org.apache.hadoop.hbase.master.ServerManager.regionServerStartup(ServerManager.java:267) at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:679) at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:403) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil.getRemoteException(ProtobufUtil.java:381) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportForDuty(HRegionServer.java:2672) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegionServer.lambda$run$1(HRegionServer.java:838) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:833) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.ipc.DecommissionedHostRejectedException: Host ef788a4cade8 exists in the list of decommissioned servers and Master is configured to reject decommissioned hosts at org.apache.hadoop.hbase.master.ServerManager.checkRejectableDecommissionedStatus(ServerManager.java:378) at org.apache.hadoop.hbase.master.ServerManager.regionServerStartup(ServerManager.java:267) at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:679) at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollStreamChannel$EpollStreamUnsafe.epollInReady(AbstractEpollStreamChannel.java:799) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:501) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] ... 1 more 2024-11-14T21:57:28,568 ERROR [RS:1;ef788a4cade8:34815 {}] regionserver.HRegionServer(2447): RegionServer abort: loaded coprocessors are: [org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint] 2024-11-14T21:57:28,573 DEBUG [RS:1;ef788a4cade8:34815 {}] util.JSONBean(135): Listing beans for java.lang:type=Memory 2024-11-14T21:57:28,576 DEBUG [RS:1;ef788a4cade8:34815 {}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=IPC 2024-11-14T21:57:28,576 DEBUG [RS:1;ef788a4cade8:34815 {}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=Replication 2024-11-14T21:57:28,576 DEBUG [RS:1;ef788a4cade8:34815 {}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=Server 2024-11-14T21:57:28,577 INFO [RS:1;ef788a4cade8:34815 {}] regionserver.HRegionServer(2451): Dump of metrics as JSON on abort: { "beans": [ { "name": "java.lang:type=Memory", "modelerType": "sun.management.MemoryImpl", "ObjectPendingFinalizationCount": 0, "HeapMemoryUsage": { "committed": 1048576000, "init": 1048576000, "max": 2306867200, "used": 729456848 }, "NonHeapMemoryUsage": { "committed": 152502272, "init": 7667712, "max": -1, "used": 150187440 }, "Verbose": false, "ObjectName": "java.lang:type=Memory" } ], "beans": [], "beans": [], "beans": [] } 2024-11-14T21:57:28,582 WARN [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=63118 {}] master.MasterRpcServices(700): ef788a4cade8,34815,1731621448485 reported a fatal error: ***** ABORTING region server ef788a4cade8,34815,1731621448485: Unhandled: org.apache.hadoop.hbase.ipc.DecommissionedHostRejectedException: Host ef788a4cade8 exists in the list of decommissioned servers and Master is configured to reject decommissioned hosts at org.apache.hadoop.hbase.master.ServerManager.checkRejectableDecommissionedStatus(ServerManager.java:378) at org.apache.hadoop.hbase.master.ServerManager.regionServerStartup(ServerManager.java:267) at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:679) at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ***** Cause: org.apache.hadoop.hbase.ipc.DecommissionedHostRejectedException: org.apache.hadoop.hbase.ipc.DecommissionedHostRejectedException: Host ef788a4cade8 exists in the list of decommissioned servers and Master is configured to reject decommissioned hosts at org.apache.hadoop.hbase.master.ServerManager.checkRejectableDecommissionedStatus(ServerManager.java:378) at org.apache.hadoop.hbase.master.ServerManager.regionServerStartup(ServerManager.java:267) at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:679) at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at java.base/jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) at java.base/jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:403) at org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil.getRemoteException(ProtobufUtil.java:381) at org.apache.hadoop.hbase.regionserver.HRegionServer.reportForDuty(HRegionServer.java:2672) at org.apache.hadoop.hbase.regionserver.HRegionServer.lambda$run$1(HRegionServer.java:838) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:833) at java.base/java.lang.Thread.run(Thread.java:840) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.ipc.DecommissionedHostRejectedException): org.apache.hadoop.hbase.ipc.DecommissionedHostRejectedException: Host ef788a4cade8 exists in the list of decommissioned servers and Master is configured to reject decommissioned hosts at org.apache.hadoop.hbase.master.ServerManager.checkRejectableDecommissionedStatus(ServerManager.java:378) at org.apache.hadoop.hbase.master.ServerManager.regionServerStartup(ServerManager.java:267) at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:679) at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollStreamChannel$EpollStreamUnsafe.epollInReady(AbstractEpollStreamChannel.java:799) at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:501) at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ... 1 more 2024-11-14T21:57:28,592 INFO [RS:1;ef788a4cade8:34815 {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'ef788a4cade8,34815,1731621448485' ***** 2024-11-14T21:57:28,592 INFO [RS:1;ef788a4cade8:34815 {}] regionserver.HRegionServer(2210): STOPPED: Unhandled: org.apache.hadoop.hbase.ipc.DecommissionedHostRejectedException: Host ef788a4cade8 exists in the list of decommissioned servers and Master is configured to reject decommissioned hosts at org.apache.hadoop.hbase.master.ServerManager.checkRejectableDecommissionedStatus(ServerManager.java:378) at org.apache.hadoop.hbase.master.ServerManager.regionServerStartup(ServerManager.java:267) at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:679) at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-14T21:57:28,593 INFO [RS:1;ef788a4cade8:34815 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager abruptly. 2024-11-14T21:57:28,593 INFO [RS:1;ef788a4cade8:34815 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager abruptly. 2024-11-14T21:57:28,593 INFO [RS:1;ef788a4cade8:34815 {}] regionserver.HRegionServer(956): aborting server ef788a4cade8,34815,1731621448485 2024-11-14T21:57:28,593 INFO [RS:1;ef788a4cade8:34815 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T21:57:28,593 INFO [RS:1;ef788a4cade8:34815 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;ef788a4cade8:34815. 2024-11-14T21:57:28,593 DEBUG [RS:1;ef788a4cade8:34815 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T21:57:28,593 DEBUG [RS:1;ef788a4cade8:34815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T21:57:28,593 INFO [RS:1;ef788a4cade8:34815 {}] regionserver.HRegionServer(976): stopping server ef788a4cade8,34815,1731621448485; all regions closed. 2024-11-14T21:57:28,593 DEBUG [RS:1;ef788a4cade8:34815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T21:57:28,593 INFO [RS:1;ef788a4cade8:34815 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T21:57:28,593 INFO [RS:1;ef788a4cade8:34815 {}] hbase.ChoreService(370): Chore service for: regionserver/ef788a4cade8:0 had [] on shutdown 2024-11-14T21:57:28,593 INFO [RS:1;ef788a4cade8:34815 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T21:57:28,593 INFO [RS:1;ef788a4cade8:34815 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34815 2024-11-14T21:57:28,596 DEBUG [RS:1;ef788a4cade8:34815 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/rs/ef788a4cade8,34815,1731621448485 already deleted, retry=false 2024-11-14T21:57:28,596 INFO [RS:1;ef788a4cade8:34815 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T21:57:28,624 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-14T21:57:28,624 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=601399315, stopped=false 2024-11-14T21:57:28,624 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=ef788a4cade8,63118,1731621445372 2024-11-14T21:57:28,637 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38561-0x100ac71eeff0001, quorum=127.0.0.1:56659, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T21:57:28,637 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38561-0x100ac71eeff0001, quorum=127.0.0.1:56659, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T21:57:28,638 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:63118-0x100ac71eeff0000, quorum=127.0.0.1:56659, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T21:57:28,638 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:63118-0x100ac71eeff0000, quorum=127.0.0.1:56659, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T21:57:28,638 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-14T21:57:28,638 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38561-0x100ac71eeff0001, quorum=127.0.0.1:56659, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T21:57:28,638 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-14T21:57:28,638 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.regionserver.TestRegionServerReportForDuty.tearDown(TestRegionServerReportForDuty.java:91) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T21:57:28,638 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T21:57:28,639 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:63118-0x100ac71eeff0000, quorum=127.0.0.1:56659, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T21:57:28,639 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'ef788a4cade8,38561,1731621445446' ***** 2024-11-14T21:57:28,639 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-14T21:57:28,642 INFO [RS:0;ef788a4cade8:38561 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-14T21:57:28,643 INFO [RS:0;ef788a4cade8:38561 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-14T21:57:28,643 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-14T21:57:28,643 INFO [RS:0;ef788a4cade8:38561 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-14T21:57:28,643 INFO [RS:0;ef788a4cade8:38561 {}] regionserver.HRegionServer(959): stopping server ef788a4cade8,38561,1731621445446 2024-11-14T21:57:28,643 INFO [RS:0;ef788a4cade8:38561 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T21:57:28,643 INFO [RS:0;ef788a4cade8:38561 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;ef788a4cade8:38561. 2024-11-14T21:57:28,643 DEBUG [RS:0;ef788a4cade8:38561 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T21:57:28,643 DEBUG [RS:0;ef788a4cade8:38561 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T21:57:28,643 INFO [RS:0;ef788a4cade8:38561 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-14T21:57:28,643 INFO [RS:0;ef788a4cade8:38561 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-14T21:57:28,643 INFO [RS:0;ef788a4cade8:38561 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-14T21:57:28,643 INFO [RS:0;ef788a4cade8:38561 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-14T21:57:28,644 INFO [RS:0;ef788a4cade8:38561 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-14T21:57:28,644 DEBUG [RS:0;ef788a4cade8:38561 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-14T21:57:28,644 DEBUG [RS:0;ef788a4cade8:38561 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-14T21:57:28,644 DEBUG [RS_CLOSE_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-14T21:57:28,644 INFO [RS_CLOSE_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-14T21:57:28,644 DEBUG [RS_CLOSE_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-14T21:57:28,644 DEBUG [RS_CLOSE_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-14T21:57:28,644 DEBUG [RS_CLOSE_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-14T21:57:28,644 INFO [RS_CLOSE_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-14T21:57:28,647 INFO [regionserver/ef788a4cade8:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T21:57:28,666 DEBUG [RS_CLOSE_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44917/user/jenkins/test-data/d1d74c93-0571-bf41-df07-26c5780742b7/data/hbase/meta/1588230740/.tmp/ns/a9771aa68d45465a8a3e2afb566cd02c is 43, key is default/ns:d/1731621448059/Put/seqid=0 2024-11-14T21:57:28,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39179 is added to blk_1073741835_1011 (size=5153) 2024-11-14T21:57:28,700 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34815-0x100ac71eeff0002, quorum=127.0.0.1:56659, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T21:57:28,700 INFO [RS:1;ef788a4cade8:34815 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T21:57:28,700 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34815-0x100ac71eeff0002, quorum=127.0.0.1:56659, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T21:57:28,700 INFO [RS:1;ef788a4cade8:34815 {}] regionserver.HRegionServer(1031): Exiting; stopping=ef788a4cade8,34815,1731621448485; zookeeper connection closed. 2024-11-14T21:57:28,844 DEBUG [RS:0;ef788a4cade8:38561 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-14T21:57:29,044 DEBUG [RS:0;ef788a4cade8:38561 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-14T21:57:29,067 INFO [regionserver/ef788a4cade8:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-14T21:57:29,067 INFO [regionserver/ef788a4cade8:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-14T21:57:29,076 INFO [RS_CLOSE_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:44917/user/jenkins/test-data/d1d74c93-0571-bf41-df07-26c5780742b7/data/hbase/meta/1588230740/.tmp/ns/a9771aa68d45465a8a3e2afb566cd02c 2024-11-14T21:57:29,090 DEBUG [RS_CLOSE_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44917/user/jenkins/test-data/d1d74c93-0571-bf41-df07-26c5780742b7/data/hbase/meta/1588230740/.tmp/ns/a9771aa68d45465a8a3e2afb566cd02c as hdfs://localhost:44917/user/jenkins/test-data/d1d74c93-0571-bf41-df07-26c5780742b7/data/hbase/meta/1588230740/ns/a9771aa68d45465a8a3e2afb566cd02c 2024-11-14T21:57:29,104 INFO [RS_CLOSE_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44917/user/jenkins/test-data/d1d74c93-0571-bf41-df07-26c5780742b7/data/hbase/meta/1588230740/ns/a9771aa68d45465a8a3e2afb566cd02c, entries=2, sequenceid=6, filesize=5.0 K 2024-11-14T21:57:29,107 INFO [RS_CLOSE_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 462ms, sequenceid=6, compaction requested=false 2024-11-14T21:57:29,107 DEBUG [RS_CLOSE_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-14T21:57:29,115 DEBUG [RS_CLOSE_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44917/user/jenkins/test-data/d1d74c93-0571-bf41-df07-26c5780742b7/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-14T21:57:29,116 DEBUG [RS_CLOSE_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-14T21:57:29,116 INFO [RS_CLOSE_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-14T21:57:29,117 DEBUG [RS_CLOSE_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731621448644Running coprocessor pre-close hooks at 1731621448644Disabling compacts and flushes for region at 1731621448644Disabling writes for close at 1731621448644Obtaining lock to block concurrent updates at 1731621448644Preparing flush snapshotting stores in 1588230740 at 1731621448644Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1731621448645 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731621448647 (+2 ms)Flushing 1588230740/ns: creating writer at 1731621448647Flushing 1588230740/ns: appending metadata at 1731621448665 (+18 ms)Flushing 1588230740/ns: closing flushed file at 1731621448665Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@714e74bc: reopening flushed file at 1731621449089 (+424 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 462ms, sequenceid=6, compaction requested=false at 1731621449107 (+18 ms)Writing region close event to WAL at 1731621449109 (+2 ms)Running coprocessor post-close hooks at 1731621449116 (+7 ms)Closed at 1731621449116 2024-11-14T21:57:29,117 DEBUG [RS_CLOSE_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-14T21:57:29,207 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/WALs/ef788a4cade8,52351,1731621131811/ef788a4cade8%2C52351%2C1731621131811.1731621136326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 12 more 2024-11-14T21:57:29,245 INFO [RS:0;ef788a4cade8:38561 {}] regionserver.HRegionServer(976): stopping server ef788a4cade8,38561,1731621445446; all regions closed. 2024-11-14T21:57:29,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39179 is added to blk_1073741834_1010 (size=1152) 2024-11-14T21:57:29,257 DEBUG [RS:0;ef788a4cade8:38561 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/d1d74c93-0571-bf41-df07-26c5780742b7/oldWALs 2024-11-14T21:57:29,258 INFO [RS:0;ef788a4cade8:38561 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL ef788a4cade8%2C38561%2C1731621445446.meta:.meta(num 1731621447888) 2024-11-14T21:57:29,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39179 is added to blk_1073741833_1009 (size=93) 2024-11-14T21:57:29,277 DEBUG [RS:0;ef788a4cade8:38561 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/d1d74c93-0571-bf41-df07-26c5780742b7/oldWALs 2024-11-14T21:57:29,277 INFO [RS:0;ef788a4cade8:38561 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL ef788a4cade8%2C38561%2C1731621445446:(num 1731621447198) 2024-11-14T21:57:29,277 DEBUG [RS:0;ef788a4cade8:38561 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T21:57:29,277 INFO [RS:0;ef788a4cade8:38561 {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T21:57:29,277 INFO [RS:0;ef788a4cade8:38561 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T21:57:29,277 INFO [RS:0;ef788a4cade8:38561 {}] hbase.ChoreService(370): Chore service for: regionserver/ef788a4cade8:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-14T21:57:29,278 INFO [RS:0;ef788a4cade8:38561 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T21:57:29,278 INFO [regionserver/ef788a4cade8:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T21:57:29,278 INFO [RS:0;ef788a4cade8:38561 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38561 2024-11-14T21:57:29,281 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:63118-0x100ac71eeff0000, quorum=127.0.0.1:56659, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T21:57:29,281 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38561-0x100ac71eeff0001, quorum=127.0.0.1:56659, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/ef788a4cade8,38561,1731621445446 2024-11-14T21:57:29,281 INFO [RS:0;ef788a4cade8:38561 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T21:57:29,281 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [ef788a4cade8,38561,1731621445446] 2024-11-14T21:57:29,284 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/ef788a4cade8,38561,1731621445446 already deleted, retry=false 2024-11-14T21:57:29,285 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; ef788a4cade8,38561,1731621445446 expired; onlineServers=0 2024-11-14T21:57:29,285 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'ef788a4cade8,63118,1731621445372' ***** 2024-11-14T21:57:29,285 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-14T21:57:29,285 INFO [M:0;ef788a4cade8:63118 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T21:57:29,285 INFO [M:0;ef788a4cade8:63118 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T21:57:29,285 DEBUG [M:0;ef788a4cade8:63118 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-14T21:57:29,285 DEBUG [M:0;ef788a4cade8:63118 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-14T21:57:29,285 DEBUG [master/ef788a4cade8:63118:becomeActiveMaster-HFileCleaner.large.0-1731621447011 {}] cleaner.HFileCleaner(306): Exit Thread[master/ef788a4cade8:63118:becomeActiveMaster-HFileCleaner.large.0-1731621447011,5,FailOnTimeoutGroup] 2024-11-14T21:57:29,285 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-14T21:57:29,285 INFO [M:0;ef788a4cade8:63118 {}] hbase.ChoreService(370): Chore service for: master/ef788a4cade8:63118 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-14T21:57:29,285 INFO [M:0;ef788a4cade8:63118 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T21:57:29,285 DEBUG [master/ef788a4cade8:63118:becomeActiveMaster-HFileCleaner.small.0-1731621447011 {}] cleaner.HFileCleaner(306): Exit Thread[master/ef788a4cade8:63118:becomeActiveMaster-HFileCleaner.small.0-1731621447011,5,FailOnTimeoutGroup] 2024-11-14T21:57:29,286 DEBUG [M:0;ef788a4cade8:63118 {}] master.HMaster(1795): Stopping service threads 2024-11-14T21:57:29,286 INFO [M:0;ef788a4cade8:63118 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-14T21:57:29,286 INFO [M:0;ef788a4cade8:63118 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-14T21:57:29,286 INFO [M:0;ef788a4cade8:63118 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-14T21:57:29,286 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-14T21:57:29,288 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:63118-0x100ac71eeff0000, quorum=127.0.0.1:56659, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-14T21:57:29,288 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:63118-0x100ac71eeff0000, quorum=127.0.0.1:56659, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T21:57:29,288 DEBUG [M:0;ef788a4cade8:63118 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/master already deleted, retry=false 2024-11-14T21:57:29,288 DEBUG [M:0;ef788a4cade8:63118 {}] master.ActiveMasterManager(353): master:63118-0x100ac71eeff0000, quorum=127.0.0.1:56659, baseZNode=/hbase Failed delete of our master address node; KeeperErrorCode = NoNode for /hbase/master 2024-11-14T21:57:29,289 INFO [M:0;ef788a4cade8:63118 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:44917/user/jenkins/test-data/d1d74c93-0571-bf41-df07-26c5780742b7/.lastflushedseqids 2024-11-14T21:57:29,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39179 is added to blk_1073741836_1012 (size=99) 2024-11-14T21:57:29,301 INFO [M:0;ef788a4cade8:63118 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-14T21:57:29,301 INFO [M:0;ef788a4cade8:63118 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-14T21:57:29,301 DEBUG [M:0;ef788a4cade8:63118 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-14T21:57:29,301 INFO [M:0;ef788a4cade8:63118 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T21:57:29,301 DEBUG [M:0;ef788a4cade8:63118 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T21:57:29,301 DEBUG [M:0;ef788a4cade8:63118 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-14T21:57:29,301 DEBUG [M:0;ef788a4cade8:63118 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T21:57:29,302 INFO [M:0;ef788a4cade8:63118 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.53 KB heapSize=11.13 KB 2024-11-14T21:57:29,329 DEBUG [M:0;ef788a4cade8:63118 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44917/user/jenkins/test-data/d1d74c93-0571-bf41-df07-26c5780742b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/53d7ba8c081b4a93a4d804e14360d3b9 is 82, key is hbase:meta,,1/info:regioninfo/1731621448034/Put/seqid=0 2024-11-14T21:57:29,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39179 is added to blk_1073741837_1013 (size=5672) 2024-11-14T21:57:29,384 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38561-0x100ac71eeff0001, quorum=127.0.0.1:56659, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T21:57:29,384 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38561-0x100ac71eeff0001, quorum=127.0.0.1:56659, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T21:57:29,384 INFO [RS:0;ef788a4cade8:38561 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T21:57:29,384 INFO [RS:0;ef788a4cade8:38561 {}] regionserver.HRegionServer(1031): Exiting; stopping=ef788a4cade8,38561,1731621445446; zookeeper connection closed. 2024-11-14T21:57:29,385 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-11-14T21:57:29,736 INFO [M:0;ef788a4cade8:63118 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=28 (bloomFilter=true), to=hdfs://localhost:44917/user/jenkins/test-data/d1d74c93-0571-bf41-df07-26c5780742b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/53d7ba8c081b4a93a4d804e14360d3b9 2024-11-14T21:57:29,766 DEBUG [M:0;ef788a4cade8:63118 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44917/user/jenkins/test-data/d1d74c93-0571-bf41-df07-26c5780742b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9667b8022b5044a9b78aee54acf2d769 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1731621448078/Put/seqid=0 2024-11-14T21:57:29,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39179 is added to blk_1073741838_1014 (size=5275) 2024-11-14T21:57:30,173 INFO [M:0;ef788a4cade8:63118 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.98 KB at sequenceid=28 (bloomFilter=true), to=hdfs://localhost:44917/user/jenkins/test-data/d1d74c93-0571-bf41-df07-26c5780742b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9667b8022b5044a9b78aee54acf2d769 2024-11-14T21:57:30,202 DEBUG [M:0;ef788a4cade8:63118 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44917/user/jenkins/test-data/d1d74c93-0571-bf41-df07-26c5780742b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/36170aee789145ccbdd29d13cbdc5d02 is 69, key is ef788a4cade8,38561,1731621445446/rs:state/1731621447014/Put/seqid=0 2024-11-14T21:57:30,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39179 is added to blk_1073741839_1015 (size=5156) 2024-11-14T21:57:30,208 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/WALs/ef788a4cade8,52351,1731621131811/ef788a4cade8%2C52351%2C1731621131811.1731621136326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 12 more 2024-11-14T21:57:30,209 INFO [M:0;ef788a4cade8:63118 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=28 (bloomFilter=true), to=hdfs://localhost:44917/user/jenkins/test-data/d1d74c93-0571-bf41-df07-26c5780742b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/36170aee789145ccbdd29d13cbdc5d02 2024-11-14T21:57:30,219 DEBUG [M:0;ef788a4cade8:63118 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44917/user/jenkins/test-data/d1d74c93-0571-bf41-df07-26c5780742b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/53d7ba8c081b4a93a4d804e14360d3b9 as hdfs://localhost:44917/user/jenkins/test-data/d1d74c93-0571-bf41-df07-26c5780742b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/53d7ba8c081b4a93a4d804e14360d3b9 2024-11-14T21:57:30,230 INFO [M:0;ef788a4cade8:63118 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44917/user/jenkins/test-data/d1d74c93-0571-bf41-df07-26c5780742b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/53d7ba8c081b4a93a4d804e14360d3b9, entries=8, sequenceid=28, filesize=5.5 K 2024-11-14T21:57:30,231 DEBUG [M:0;ef788a4cade8:63118 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44917/user/jenkins/test-data/d1d74c93-0571-bf41-df07-26c5780742b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9667b8022b5044a9b78aee54acf2d769 as hdfs://localhost:44917/user/jenkins/test-data/d1d74c93-0571-bf41-df07-26c5780742b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/9667b8022b5044a9b78aee54acf2d769 2024-11-14T21:57:30,241 INFO [M:0;ef788a4cade8:63118 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44917/user/jenkins/test-data/d1d74c93-0571-bf41-df07-26c5780742b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/9667b8022b5044a9b78aee54acf2d769, entries=3, sequenceid=28, filesize=5.2 K 2024-11-14T21:57:30,243 DEBUG [M:0;ef788a4cade8:63118 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44917/user/jenkins/test-data/d1d74c93-0571-bf41-df07-26c5780742b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/36170aee789145ccbdd29d13cbdc5d02 as hdfs://localhost:44917/user/jenkins/test-data/d1d74c93-0571-bf41-df07-26c5780742b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/36170aee789145ccbdd29d13cbdc5d02 2024-11-14T21:57:30,256 INFO [M:0;ef788a4cade8:63118 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44917/user/jenkins/test-data/d1d74c93-0571-bf41-df07-26c5780742b7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/36170aee789145ccbdd29d13cbdc5d02, entries=1, sequenceid=28, filesize=5.0 K 2024-11-14T21:57:30,257 INFO [M:0;ef788a4cade8:63118 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.53 KB/7712, heapSize ~10.83 KB/11088, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 956ms, sequenceid=28, compaction requested=false 2024-11-14T21:57:30,260 INFO [M:0;ef788a4cade8:63118 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T21:57:30,261 DEBUG [M:0;ef788a4cade8:63118 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731621449301Disabling compacts and flushes for region at 1731621449301Disabling writes for close at 1731621449301Obtaining lock to block concurrent updates at 1731621449302 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731621449302Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7712, getHeapSize=11328, getOffHeapSize=0, getCellsCount=35 at 1731621449302Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731621449304 (+2 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731621449304Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731621449328 (+24 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731621449328Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731621449746 (+418 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731621449765 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731621449765Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731621450182 (+417 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731621450202 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731621450202Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@149053ed: reopening flushed file at 1731621450217 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3471fe5d: reopening flushed file at 1731621450230 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@221dffdb: reopening flushed file at 1731621450241 (+11 ms)Finished flush of dataSize ~7.53 KB/7712, heapSize ~10.83 KB/11088, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 956ms, sequenceid=28, compaction requested=false at 1731621450257 (+16 ms)Writing region close event to WAL at 1731621450260 (+3 ms)Closed at 1731621450260 2024-11-14T21:57:30,265 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(650): complete file /user/jenkins/test-data/d1d74c93-0571-bf41-df07-26c5780742b7/MasterData/WALs/ef788a4cade8,63118,1731621445372/ef788a4cade8%2C63118%2C1731621445372.1731621446835 not finished, retry = 0 2024-11-14T21:57:30,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39179 is added to blk_1073741830_1006 (size=10075) 2024-11-14T21:57:30,367 INFO [M:0;ef788a4cade8:63118 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-14T21:57:30,367 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T21:57:30,367 INFO [M:0;ef788a4cade8:63118 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:63118 2024-11-14T21:57:30,367 INFO [M:0;ef788a4cade8:63118 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T21:57:30,469 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:63118-0x100ac71eeff0000, quorum=127.0.0.1:56659, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T21:57:30,469 INFO [M:0;ef788a4cade8:63118 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T21:57:30,469 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:63118-0x100ac71eeff0000, quorum=127.0.0.1:56659, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T21:57:30,472 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-14T21:57:30,480 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@477b7dda{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T21:57:30,481 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@75cff82f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T21:57:30,481 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T21:57:30,481 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@e7a99c5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T21:57:30,481 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6b15505e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7dd3d42-e19b-d5bb-c6c8-5553e79df628/hadoop.log.dir/,STOPPED} 2024-11-14T21:57:30,485 WARN [BP-912554017-172.17.0.2-1731621444390 heartbeating to localhost/127.0.0.1:44917 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T21:57:30,485 WARN [BP-912554017-172.17.0.2-1731621444390 heartbeating to localhost/127.0.0.1:44917 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-912554017-172.17.0.2-1731621444390 (Datanode Uuid 12604faa-dd5a-4367-b93e-4a85fb835e72) service to localhost/127.0.0.1:44917 2024-11-14T21:57:30,485 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7dd3d42-e19b-d5bb-c6c8-5553e79df628/cluster_513b0af5-c531-7cf9-7aeb-97960bb26752/data/data1/current/BP-912554017-172.17.0.2-1731621444390 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T21:57:30,486 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7dd3d42-e19b-d5bb-c6c8-5553e79df628/cluster_513b0af5-c531-7cf9-7aeb-97960bb26752/data/data2/current/BP-912554017-172.17.0.2-1731621444390 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T21:57:30,486 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T21:57:30,486 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T21:57:30,486 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T21:57:30,497 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@c1c27ff{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-14T21:57:30,498 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@35919168{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T21:57:30,499 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T21:57:30,499 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3d39a7cf{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T21:57:30,499 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6315fee5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7dd3d42-e19b-d5bb-c6c8-5553e79df628/hadoop.log.dir/,STOPPED} 2024-11-14T21:57:30,514 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.TestRegionServerReportForDuty#testReportForDutyGetsRejectedByMasterWhenConfiguredToRejectDecommissionedHosts Thread=85 (was 63) Potentially hanging thread: IPC Client (1512999374) connection to localhost/127.0.0.1:44917 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HRegionServer-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-7-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-6-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-7-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-6-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-7-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44917 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-6-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44917 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1512999374) connection to localhost/127.0.0.1:44917 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ForkJoinPool-2-worker-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: HRegionServer-EventLoopGroup-9-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:44917 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: HMaster-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HRegionServer-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: Abort regionserver monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) - Thread LEAK? -, OpenFileDescriptor=396 (was 356) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=340 (was 339) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=2652 (was 3063) 2024-11-14T21:57:30,534 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.TestRegionServerReportForDuty#testReportForDutyWithEnvironmentEdge Thread=85, OpenFileDescriptor=396, MaxFileDescriptor=1048576, SystemLoadAverage=340, ProcessCount=11, AvailableMemoryMB=2652 2024-11-14T21:57:30,552 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7dd3d42-e19b-d5bb-c6c8-5553e79df628/hadoop.log.dir so I do NOT create it in target/test-data/58bf6ba3-87ab-f845-9cbb-3318a545432d 2024-11-14T21:57:30,552 WARN [Time-limited test {}] hbase.HBaseTestingUtil(405): hadoop.log.dir property value differs in configuration and system: Configuration=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/../logs while System=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7dd3d42-e19b-d5bb-c6c8-5553e79df628/hadoop.log.dir Erasing configuration value by system value. 2024-11-14T21:57:30,552 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7dd3d42-e19b-d5bb-c6c8-5553e79df628/hadoop.tmp.dir so I do NOT create it in target/test-data/58bf6ba3-87ab-f845-9cbb-3318a545432d 2024-11-14T21:57:30,552 WARN [Time-limited test {}] hbase.HBaseTestingUtil(405): hadoop.tmp.dir property value differs in configuration and system: Configuration=/tmp/hadoop-jenkins while System=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7dd3d42-e19b-d5bb-c6c8-5553e79df628/hadoop.tmp.dir Erasing configuration value by system value. 2024-11-14T21:57:30,552 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/58bf6ba3-87ab-f845-9cbb-3318a545432d 2024-11-14T21:57:30,552 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/58bf6ba3-87ab-f845-9cbb-3318a545432d/cluster_53f5a298-07f1-d7b6-75d6-ea18843da0cd, deleteOnExit=true 2024-11-14T21:57:30,553 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/58bf6ba3-87ab-f845-9cbb-3318a545432d/test.cache.data in system properties and HBase conf 2024-11-14T21:57:30,553 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/58bf6ba3-87ab-f845-9cbb-3318a545432d/hadoop.tmp.dir in system properties and HBase conf 2024-11-14T21:57:30,553 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/58bf6ba3-87ab-f845-9cbb-3318a545432d/hadoop.log.dir in system properties and HBase conf 2024-11-14T21:57:30,553 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/58bf6ba3-87ab-f845-9cbb-3318a545432d/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-14T21:57:30,553 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/58bf6ba3-87ab-f845-9cbb-3318a545432d/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-14T21:57:30,553 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-14T21:57:30,553 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-14T21:57:30,554 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/58bf6ba3-87ab-f845-9cbb-3318a545432d/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-14T21:57:30,554 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/58bf6ba3-87ab-f845-9cbb-3318a545432d/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-14T21:57:30,554 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/58bf6ba3-87ab-f845-9cbb-3318a545432d/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-14T21:57:30,554 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/58bf6ba3-87ab-f845-9cbb-3318a545432d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-14T21:57:30,554 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/58bf6ba3-87ab-f845-9cbb-3318a545432d/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-14T21:57:30,554 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/58bf6ba3-87ab-f845-9cbb-3318a545432d/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-14T21:57:30,554 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/58bf6ba3-87ab-f845-9cbb-3318a545432d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-14T21:57:30,555 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/58bf6ba3-87ab-f845-9cbb-3318a545432d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-14T21:57:30,555 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/58bf6ba3-87ab-f845-9cbb-3318a545432d/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-14T21:57:30,555 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/58bf6ba3-87ab-f845-9cbb-3318a545432d/nfs.dump.dir in system properties and HBase conf 2024-11-14T21:57:30,555 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/58bf6ba3-87ab-f845-9cbb-3318a545432d/java.io.tmpdir in system properties and HBase conf 2024-11-14T21:57:30,555 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/58bf6ba3-87ab-f845-9cbb-3318a545432d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-14T21:57:30,555 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/58bf6ba3-87ab-f845-9cbb-3318a545432d/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-14T21:57:30,555 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/58bf6ba3-87ab-f845-9cbb-3318a545432d/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-14T21:57:30,666 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T21:57:30,671 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T21:57:30,673 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T21:57:30,673 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T21:57:30,673 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-14T21:57:30,674 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T21:57:30,674 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3aae06e9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/58bf6ba3-87ab-f845-9cbb-3318a545432d/hadoop.log.dir/,AVAILABLE} 2024-11-14T21:57:30,675 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6422d591{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T21:57:30,799 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@11893ef0{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/58bf6ba3-87ab-f845-9cbb-3318a545432d/java.io.tmpdir/jetty-localhost-45721-hadoop-hdfs-3_4_1-tests_jar-_-any-14200728616666163459/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-14T21:57:30,800 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5152a519{HTTP/1.1, (http/1.1)}{localhost:45721} 2024-11-14T21:57:30,800 INFO [Time-limited test {}] server.Server(415): Started @338241ms 2024-11-14T21:57:30,918 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T21:57:30,922 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T21:57:30,923 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T21:57:30,923 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T21:57:30,924 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-14T21:57:30,924 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2f496114{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/58bf6ba3-87ab-f845-9cbb-3318a545432d/hadoop.log.dir/,AVAILABLE} 2024-11-14T21:57:30,925 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7d67faf4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T21:57:31,066 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@19b7eb5d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/58bf6ba3-87ab-f845-9cbb-3318a545432d/java.io.tmpdir/jetty-localhost-45971-hadoop-hdfs-3_4_1-tests_jar-_-any-3307407240034705128/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T21:57:31,067 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@251173d3{HTTP/1.1, (http/1.1)}{localhost:45971} 2024-11-14T21:57:31,067 INFO [Time-limited test {}] server.Server(415): Started @338508ms 2024-11-14T21:57:31,069 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T21:57:31,195 WARN [Thread-517 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/58bf6ba3-87ab-f845-9cbb-3318a545432d/cluster_53f5a298-07f1-d7b6-75d6-ea18843da0cd/data/data1/current/BP-103333673-172.17.0.2-1731621450597/current, will proceed with Du for space computation calculation, 2024-11-14T21:57:31,195 WARN [Thread-518 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/58bf6ba3-87ab-f845-9cbb-3318a545432d/cluster_53f5a298-07f1-d7b6-75d6-ea18843da0cd/data/data2/current/BP-103333673-172.17.0.2-1731621450597/current, will proceed with Du for space computation calculation, 2024-11-14T21:57:31,209 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/WALs/ef788a4cade8,52351,1731621131811/ef788a4cade8%2C52351%2C1731621131811.1731621136326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 12 more 2024-11-14T21:57:31,214 WARN [Thread-505 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T21:57:31,217 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x46b97c2bbabd5eca with lease ID 0xa96657f030e1e5e: Processing first storage report for DS-61162a1d-47a5-4e3d-926a-61222a812f1f from datanode DatanodeRegistration(127.0.0.1:43815, datanodeUuid=8fb87c64-62a1-444c-a361-1ba6d2b56f6e, infoPort=33213, infoSecurePort=0, ipcPort=43651, storageInfo=lv=-57;cid=testClusterID;nsid=1623745977;c=1731621450597) 2024-11-14T21:57:31,217 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x46b97c2bbabd5eca with lease ID 0xa96657f030e1e5e: from storage DS-61162a1d-47a5-4e3d-926a-61222a812f1f node DatanodeRegistration(127.0.0.1:43815, datanodeUuid=8fb87c64-62a1-444c-a361-1ba6d2b56f6e, infoPort=33213, infoSecurePort=0, ipcPort=43651, storageInfo=lv=-57;cid=testClusterID;nsid=1623745977;c=1731621450597), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T21:57:31,217 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x46b97c2bbabd5eca with lease ID 0xa96657f030e1e5e: Processing first storage report for DS-bbe5ddf4-7a5a-45d1-8ed8-b720b88187db from datanode DatanodeRegistration(127.0.0.1:43815, datanodeUuid=8fb87c64-62a1-444c-a361-1ba6d2b56f6e, infoPort=33213, infoSecurePort=0, ipcPort=43651, storageInfo=lv=-57;cid=testClusterID;nsid=1623745977;c=1731621450597) 2024-11-14T21:57:31,217 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x46b97c2bbabd5eca with lease ID 0xa96657f030e1e5e: from storage DS-bbe5ddf4-7a5a-45d1-8ed8-b720b88187db node DatanodeRegistration(127.0.0.1:43815, datanodeUuid=8fb87c64-62a1-444c-a361-1ba6d2b56f6e, infoPort=33213, infoSecurePort=0, ipcPort=43651, storageInfo=lv=-57;cid=testClusterID;nsid=1623745977;c=1731621450597), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-14T21:57:31,234 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/58bf6ba3-87ab-f845-9cbb-3318a545432d 2024-11-14T21:57:31,237 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/58bf6ba3-87ab-f845-9cbb-3318a545432d/cluster_53f5a298-07f1-d7b6-75d6-ea18843da0cd/zookeeper_0, clientPort=59788, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/58bf6ba3-87ab-f845-9cbb-3318a545432d/cluster_53f5a298-07f1-d7b6-75d6-ea18843da0cd/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/58bf6ba3-87ab-f845-9cbb-3318a545432d/cluster_53f5a298-07f1-d7b6-75d6-ea18843da0cd/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-14T21:57:31,238 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=59788 2024-11-14T21:57:31,239 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T21:57:31,240 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T21:57:31,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43815 is added to blk_1073741825_1001 (size=7) 2024-11-14T21:57:31,652 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:33187/user/jenkins/test-data/3ce363e6-46d1-65d8-e928-056c5e5c6fba with version=8 2024-11-14T21:57:31,656 INFO [Time-limited test {}] client.ConnectionUtils(128): master/ef788a4cade8:60537 server-side Connection retries=45 2024-11-14T21:57:31,656 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T21:57:31,656 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T21:57:31,656 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T21:57:31,657 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T21:57:31,657 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T21:57:31,657 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-14T21:57:31,657 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T21:57:31,658 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:60537 2024-11-14T21:57:31,659 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:60537 connecting to ZooKeeper ensemble=127.0.0.1:59788 2024-11-14T21:57:31,672 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:605370x0, quorum=127.0.0.1:59788, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T21:57:31,672 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:60537-0x100ac7206040000 connected 2024-11-14T21:57:31,694 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T21:57:31,696 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T21:57:31,698 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:60537-0x100ac7206040000, quorum=127.0.0.1:59788, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T21:57:31,698 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:33187/user/jenkins/test-data/3ce363e6-46d1-65d8-e928-056c5e5c6fba, hbase.cluster.distributed=false 2024-11-14T21:57:31,700 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:60537-0x100ac7206040000, quorum=127.0.0.1:59788, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T21:57:31,701 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=60537 2024-11-14T21:57:31,701 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=60537 2024-11-14T21:57:31,702 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=60537 2024-11-14T21:57:31,702 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=60537 2024-11-14T21:57:31,702 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=60537 2024-11-14T21:57:31,704 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/ef788a4cade8:0 server-side Connection retries=45 2024-11-14T21:57:31,704 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T21:57:31,704 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T21:57:31,704 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T21:57:31,704 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T21:57:31,704 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T21:57:31,704 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-14T21:57:31,704 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T21:57:31,705 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37019 2024-11-14T21:57:31,706 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37019 connecting to ZooKeeper ensemble=127.0.0.1:59788 2024-11-14T21:57:31,706 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T21:57:31,708 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T21:57:31,712 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:370190x0, quorum=127.0.0.1:59788, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T21:57:31,713 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:370190x0, quorum=127.0.0.1:59788, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T21:57:31,713 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37019-0x100ac7206040001 connected 2024-11-14T21:57:31,713 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-14T21:57:31,714 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-14T21:57:31,714 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37019-0x100ac7206040001, quorum=127.0.0.1:59788, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-14T21:57:31,715 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37019-0x100ac7206040001, quorum=127.0.0.1:59788, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T21:57:31,719 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37019 2024-11-14T21:57:31,719 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37019 2024-11-14T21:57:31,719 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37019 2024-11-14T21:57:31,719 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37019 2024-11-14T21:57:31,720 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37019 2024-11-14T21:57:31,720 DEBUG [Time-limited test {}] regionserver.TestRegionServerReportForDuty(337): Starting master: ef788a4cade8,60537,1731621451654 2024-11-14T21:57:31,734 DEBUG [M:0;ef788a4cade8:60537 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;ef788a4cade8:60537 2024-11-14T21:57:31,734 INFO [master/ef788a4cade8:60537:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/ef788a4cade8,60537,1731621451654 2024-11-14T21:57:31,737 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:60537-0x100ac7206040000, quorum=127.0.0.1:59788, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T21:57:31,737 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37019-0x100ac7206040001, quorum=127.0.0.1:59788, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T21:57:31,737 DEBUG [master/ef788a4cade8:60537:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:60537-0x100ac7206040000, quorum=127.0.0.1:59788, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/ef788a4cade8,60537,1731621451654 2024-11-14T21:57:31,739 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37019-0x100ac7206040001, quorum=127.0.0.1:59788, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-14T21:57:31,739 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:60537-0x100ac7206040000, quorum=127.0.0.1:59788, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T21:57:31,739 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37019-0x100ac7206040001, quorum=127.0.0.1:59788, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T21:57:31,740 DEBUG [master/ef788a4cade8:60537:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:60537-0x100ac7206040000, quorum=127.0.0.1:59788, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-14T21:57:31,741 INFO [master/ef788a4cade8:60537:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/ef788a4cade8,60537,1731621451654 from backup master directory 2024-11-14T21:57:31,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:60537-0x100ac7206040000, quorum=127.0.0.1:59788, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/ef788a4cade8,60537,1731621451654 2024-11-14T21:57:31,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37019-0x100ac7206040001, quorum=127.0.0.1:59788, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T21:57:31,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:60537-0x100ac7206040000, quorum=127.0.0.1:59788, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T21:57:31,744 WARN [master/ef788a4cade8:60537:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T21:57:31,744 INFO [master/ef788a4cade8:60537:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=ef788a4cade8,60537,1731621451654 2024-11-14T21:57:31,749 DEBUG [master/ef788a4cade8:60537:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:33187/user/jenkins/test-data/3ce363e6-46d1-65d8-e928-056c5e5c6fba/hbase.id] with ID: 073d8d5b-f682-415b-abd1-5a095791c050 2024-11-14T21:57:31,749 DEBUG [master/ef788a4cade8:60537:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:33187/user/jenkins/test-data/3ce363e6-46d1-65d8-e928-056c5e5c6fba/.tmp/hbase.id 2024-11-14T21:57:31,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43815 is added to blk_1073741826_1002 (size=42) 2024-11-14T21:57:32,158 DEBUG [master/ef788a4cade8:60537:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:33187/user/jenkins/test-data/3ce363e6-46d1-65d8-e928-056c5e5c6fba/.tmp/hbase.id]:[hdfs://localhost:33187/user/jenkins/test-data/3ce363e6-46d1-65d8-e928-056c5e5c6fba/hbase.id] 2024-11-14T21:57:32,174 INFO [master/ef788a4cade8:60537:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T21:57:32,174 INFO [master/ef788a4cade8:60537:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-14T21:57:32,176 INFO [master/ef788a4cade8:60537:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-14T21:57:32,178 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:60537-0x100ac7206040000, quorum=127.0.0.1:59788, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T21:57:32,178 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37019-0x100ac7206040001, quorum=127.0.0.1:59788, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T21:57:32,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43815 is added to blk_1073741827_1003 (size=196) 2024-11-14T21:57:32,209 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/WALs/ef788a4cade8,52351,1731621131811/ef788a4cade8%2C52351%2C1731621131811.1731621136326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 12 more 2024-11-14T21:57:32,220 DEBUG [Time-limited test {}] regionserver.TestRegionServerReportForDuty(349): Waiting for master to come online ... 2024-11-14T21:57:32,586 INFO [master/ef788a4cade8:60537:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-14T21:57:32,588 INFO [master/ef788a4cade8:60537:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-14T21:57:32,588 INFO [master/ef788a4cade8:60537:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-14T21:57:32,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43815 is added to blk_1073741828_1004 (size=1189) 2024-11-14T21:57:32,720 DEBUG [Time-limited test {}] regionserver.TestRegionServerReportForDuty(349): Waiting for master to come online ... 2024-11-14T21:57:32,957 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T21:57:32,968 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T21:57:32,997 INFO [master/ef788a4cade8:60537:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:33187/user/jenkins/test-data/3ce363e6-46d1-65d8-e928-056c5e5c6fba/MasterData/data/master/store 2024-11-14T21:57:33,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43815 is added to blk_1073741829_1005 (size=34) 2024-11-14T21:57:33,009 DEBUG [master/ef788a4cade8:60537:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T21:57:33,009 DEBUG [master/ef788a4cade8:60537:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-14T21:57:33,009 INFO [master/ef788a4cade8:60537:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T21:57:33,009 DEBUG [master/ef788a4cade8:60537:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T21:57:33,009 DEBUG [master/ef788a4cade8:60537:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 1 ms 2024-11-14T21:57:33,009 DEBUG [master/ef788a4cade8:60537:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T21:57:33,009 INFO [master/ef788a4cade8:60537:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T21:57:33,009 DEBUG [master/ef788a4cade8:60537:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731621451736Disabling compacts and flushes for region at 1731621451737 (+1 ms)Disabling writes for close at 1731621451740 (+3 ms)Writing region close event to WAL at 1731621451741 (+1 ms)Closed at 1731621451743 (+2 ms) 2024-11-14T21:57:33,010 WARN [master/ef788a4cade8:60537:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:33187/user/jenkins/test-data/3ce363e6-46d1-65d8-e928-056c5e5c6fba/MasterData/data/master/store/.initializing 2024-11-14T21:57:33,010 DEBUG [master/ef788a4cade8:60537:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:33187/user/jenkins/test-data/3ce363e6-46d1-65d8-e928-056c5e5c6fba/MasterData/WALs/ef788a4cade8,60537,1731621451654 2024-11-14T21:57:33,011 INFO [master/ef788a4cade8:60537:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-14T21:57:33,014 INFO [master/ef788a4cade8:60537:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ef788a4cade8%2C60537%2C1731621451654, suffix=, logDir=hdfs://localhost:33187/user/jenkins/test-data/3ce363e6-46d1-65d8-e928-056c5e5c6fba/MasterData/WALs/ef788a4cade8,60537,1731621451654, archiveDir=hdfs://localhost:33187/user/jenkins/test-data/3ce363e6-46d1-65d8-e928-056c5e5c6fba/MasterData/oldWALs, maxLogs=10 2024-11-14T21:57:33,033 DEBUG [master/ef788a4cade8:60537:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/3ce363e6-46d1-65d8-e928-056c5e5c6fba/MasterData/WALs/ef788a4cade8,60537,1731621451654/ef788a4cade8%2C60537%2C1731621451654.1731621451746, exclude list is [], retry=0 2024-11-14T21:57:33,046 DEBUG [HRegionServer-EventLoopGroup-12-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43815,DS-61162a1d-47a5-4e3d-926a-61222a812f1f,DISK] 2024-11-14T21:57:33,049 INFO [master/ef788a4cade8:60537:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3ce363e6-46d1-65d8-e928-056c5e5c6fba/MasterData/WALs/ef788a4cade8,60537,1731621451654/ef788a4cade8%2C60537%2C1731621451654.1731621451746 2024-11-14T21:57:33,050 DEBUG [master/ef788a4cade8:60537:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:33213:33213)] 2024-11-14T21:57:33,051 DEBUG [master/ef788a4cade8:60537:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-14T21:57:33,051 DEBUG [master/ef788a4cade8:60537:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T21:57:33,051 DEBUG [master/ef788a4cade8:60537:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T21:57:33,051 DEBUG [master/ef788a4cade8:60537:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T21:57:33,053 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T21:57:33,055 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-14T21:57:33,055 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T21:57:33,056 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T21:57:33,056 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T21:57:33,058 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-14T21:57:33,058 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T21:57:33,059 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T21:57:33,059 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T21:57:33,061 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-14T21:57:33,061 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T21:57:33,061 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T21:57:33,061 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T21:57:33,064 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-14T21:57:33,064 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T21:57:33,064 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T21:57:33,065 DEBUG [master/ef788a4cade8:60537:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T21:57:33,065 DEBUG [master/ef788a4cade8:60537:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33187/user/jenkins/test-data/3ce363e6-46d1-65d8-e928-056c5e5c6fba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-14T21:57:33,066 DEBUG [master/ef788a4cade8:60537:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33187/user/jenkins/test-data/3ce363e6-46d1-65d8-e928-056c5e5c6fba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-14T21:57:33,067 DEBUG [master/ef788a4cade8:60537:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T21:57:33,067 DEBUG [master/ef788a4cade8:60537:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T21:57:33,068 DEBUG [master/ef788a4cade8:60537:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-14T21:57:33,069 DEBUG [master/ef788a4cade8:60537:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T21:57:33,072 DEBUG [master/ef788a4cade8:60537:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33187/user/jenkins/test-data/3ce363e6-46d1-65d8-e928-056c5e5c6fba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T21:57:33,072 INFO [master/ef788a4cade8:60537:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60170632, jitterRate=-0.10338771343231201}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-14T21:57:33,073 DEBUG [master/ef788a4cade8:60537:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731621451759Initializing all the Stores at 1731621451762 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731621451763 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731621451764 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731621451765 (+1 ms)Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731621451766 (+1 ms)Cleaning up temporary data from old regions at 1731621451767 (+1 ms)Region opened successfully at 1731621451770 (+3 ms) 2024-11-14T21:57:33,073 INFO [master/ef788a4cade8:60537:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-14T21:57:33,078 DEBUG [master/ef788a4cade8:60537:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1c9d8fb3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=ef788a4cade8/172.17.0.2:0 2024-11-14T21:57:33,080 INFO [master/ef788a4cade8:60537:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-14T21:57:33,080 INFO [master/ef788a4cade8:60537:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-14T21:57:33,080 INFO [master/ef788a4cade8:60537:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-14T21:57:33,080 INFO [master/ef788a4cade8:60537:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-14T21:57:33,081 INFO [master/ef788a4cade8:60537:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-14T21:57:33,081 INFO [master/ef788a4cade8:60537:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-14T21:57:33,081 INFO [master/ef788a4cade8:60537:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=0 2024-11-14T21:57:33,085 INFO [master/ef788a4cade8:60537:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-14T21:57:33,086 DEBUG [master/ef788a4cade8:60537:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:60537-0x100ac7206040000, quorum=127.0.0.1:59788, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-14T21:57:33,088 DEBUG [master/ef788a4cade8:60537:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-14T21:57:33,088 INFO [master/ef788a4cade8:60537:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-14T21:57:33,089 DEBUG [master/ef788a4cade8:60537:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:60537-0x100ac7206040000, quorum=127.0.0.1:59788, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-14T21:57:33,090 DEBUG [master/ef788a4cade8:60537:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-14T21:57:33,091 INFO [master/ef788a4cade8:60537:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-14T21:57:33,092 DEBUG [master/ef788a4cade8:60537:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:60537-0x100ac7206040000, quorum=127.0.0.1:59788, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-14T21:57:33,093 DEBUG [master/ef788a4cade8:60537:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-14T21:57:33,094 DEBUG [master/ef788a4cade8:60537:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:60537-0x100ac7206040000, quorum=127.0.0.1:59788, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-14T21:57:33,097 DEBUG [master/ef788a4cade8:60537:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-14T21:57:33,100 DEBUG [master/ef788a4cade8:60537:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:60537-0x100ac7206040000, quorum=127.0.0.1:59788, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-14T21:57:33,102 DEBUG [master/ef788a4cade8:60537:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-14T21:57:33,105 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37019-0x100ac7206040001, quorum=127.0.0.1:59788, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-14T21:57:33,105 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37019-0x100ac7206040001, quorum=127.0.0.1:59788, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T21:57:33,105 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:60537-0x100ac7206040000, quorum=127.0.0.1:59788, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-14T21:57:33,105 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:60537-0x100ac7206040000, quorum=127.0.0.1:59788, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T21:57:33,106 INFO [master/ef788a4cade8:60537:becomeActiveMaster {}] master.HMaster(856): Active/primary master=ef788a4cade8,60537,1731621451654, sessionid=0x100ac7206040000, setting cluster-up flag (Was=false) 2024-11-14T21:57:33,107 INFO [RS:0;ef788a4cade8:37019 {}] regionserver.HRegionServer(746): ClusterId : 073d8d5b-f682-415b-abd1-5a095791c050 2024-11-14T21:57:33,107 DEBUG [RS:0;ef788a4cade8:37019 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-14T21:57:33,112 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37019-0x100ac7206040001, quorum=127.0.0.1:59788, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T21:57:33,112 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:60537-0x100ac7206040000, quorum=127.0.0.1:59788, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T21:57:33,121 DEBUG [master/ef788a4cade8:60537:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-14T21:57:33,124 DEBUG [RS:0;ef788a4cade8:37019 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-14T21:57:33,124 DEBUG [RS:0;ef788a4cade8:37019 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-14T21:57:33,125 DEBUG [master/ef788a4cade8:60537:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=ef788a4cade8,60537,1731621451654 2024-11-14T21:57:33,127 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37019-0x100ac7206040001, quorum=127.0.0.1:59788, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T21:57:33,127 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:60537-0x100ac7206040000, quorum=127.0.0.1:59788, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T21:57:33,135 DEBUG [master/ef788a4cade8:60537:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-14T21:57:33,135 DEBUG [RS:0;ef788a4cade8:37019 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-14T21:57:33,135 DEBUG [RS:0;ef788a4cade8:37019 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@355965f2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=ef788a4cade8/172.17.0.2:0 2024-11-14T21:57:33,138 DEBUG [master/ef788a4cade8:60537:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=ef788a4cade8,60537,1731621451654 2024-11-14T21:57:33,143 INFO [master/ef788a4cade8:60537:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:33187/user/jenkins/test-data/3ce363e6-46d1-65d8-e928-056c5e5c6fba/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-14T21:57:33,146 DEBUG [master/ef788a4cade8:60537:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-14T21:57:33,146 INFO [master/ef788a4cade8:60537:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-14T21:57:33,147 INFO [master/ef788a4cade8:60537:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-14T21:57:33,147 DEBUG [master/ef788a4cade8:60537:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: ef788a4cade8,60537,1731621451654 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-14T21:57:33,149 DEBUG [master/ef788a4cade8:60537:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/ef788a4cade8:60537, corePoolSize=5, maxPoolSize=5 2024-11-14T21:57:33,149 DEBUG [master/ef788a4cade8:60537:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/ef788a4cade8:60537, corePoolSize=5, maxPoolSize=5 2024-11-14T21:57:33,149 DEBUG [master/ef788a4cade8:60537:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/ef788a4cade8:60537, corePoolSize=5, maxPoolSize=5 2024-11-14T21:57:33,149 DEBUG [master/ef788a4cade8:60537:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/ef788a4cade8:60537, corePoolSize=5, maxPoolSize=5 2024-11-14T21:57:33,149 DEBUG [master/ef788a4cade8:60537:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/ef788a4cade8:60537, corePoolSize=10, maxPoolSize=10 2024-11-14T21:57:33,149 DEBUG [master/ef788a4cade8:60537:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/ef788a4cade8:60537, corePoolSize=1, maxPoolSize=1 2024-11-14T21:57:33,149 DEBUG [master/ef788a4cade8:60537:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/ef788a4cade8:60537, corePoolSize=2, maxPoolSize=2 2024-11-14T21:57:33,149 DEBUG [master/ef788a4cade8:60537:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/ef788a4cade8:60537, corePoolSize=1, maxPoolSize=1 2024-11-14T21:57:33,155 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T21:57:33,156 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-14T21:57:33,157 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T21:57:33,157 DEBUG [RS:0;ef788a4cade8:37019 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;ef788a4cade8:37019 2024-11-14T21:57:33,157 INFO [RS:0;ef788a4cade8:37019 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-14T21:57:33,157 INFO [RS:0;ef788a4cade8:37019 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-14T21:57:33,157 DEBUG [RS:0;ef788a4cade8:37019 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-14T21:57:33,157 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-14T21:57:33,158 INFO [RS:0;ef788a4cade8:37019 {}] regionserver.HRegionServer(2659): reportForDuty to master=ef788a4cade8,60537,1731621451654 with port=37019, startcode=1731621451655 2024-11-14T21:57:33,158 DEBUG [RS:0;ef788a4cade8:37019 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-14T21:57:33,162 INFO [master/ef788a4cade8:60537:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731621481861 2024-11-14T21:57:33,163 INFO [master/ef788a4cade8:60537:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-14T21:57:33,163 INFO [master/ef788a4cade8:60537:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-14T21:57:33,163 INFO [master/ef788a4cade8:60537:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-14T21:57:33,163 INFO [master/ef788a4cade8:60537:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-14T21:57:33,163 INFO [master/ef788a4cade8:60537:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-14T21:57:33,163 INFO [master/ef788a4cade8:60537:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-14T21:57:33,167 INFO [HMaster-EventLoopGroup-11-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57953, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=RegionServerStatusService 2024-11-14T21:57:33,168 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=60537 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-14T21:57:33,169 DEBUG [RS:0;ef788a4cade8:37019 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-14T21:57:33,169 WARN [RS:0;ef788a4cade8:37019 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-14T21:57:33,182 INFO [master/ef788a4cade8:60537:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-14T21:57:33,195 INFO [master/ef788a4cade8:60537:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-14T21:57:33,195 INFO [master/ef788a4cade8:60537:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-14T21:57:33,195 INFO [master/ef788a4cade8:60537:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-14T21:57:33,195 INFO [master/ef788a4cade8:60537:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-14T21:57:33,195 INFO [master/ef788a4cade8:60537:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-14T21:57:33,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43815 is added to blk_1073741831_1007 (size=1321) 2024-11-14T21:57:33,202 DEBUG [master/ef788a4cade8:60537:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/ef788a4cade8:60537:becomeActiveMaster-HFileCleaner.large.0-1731621451886,5,FailOnTimeoutGroup] 2024-11-14T21:57:33,206 DEBUG [master/ef788a4cade8:60537:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/ef788a4cade8:60537:becomeActiveMaster-HFileCleaner.small.0-1731621451887,5,FailOnTimeoutGroup] 2024-11-14T21:57:33,206 INFO [master/ef788a4cade8:60537:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-14T21:57:33,206 INFO [master/ef788a4cade8:60537:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-14T21:57:33,207 INFO [master/ef788a4cade8:60537:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-14T21:57:33,207 INFO [master/ef788a4cade8:60537:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-14T21:57:33,210 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/WALs/ef788a4cade8,52351,1731621131811/ef788a4cade8%2C52351%2C1731621131811.1731621136326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 12 more 2024-11-14T21:57:33,221 DEBUG [Time-limited test {}] regionserver.TestRegionServerReportForDuty(349): Waiting for master to come online ... 2024-11-14T21:57:33,491 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-14T21:57:33,496 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T21:57:33,545 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T21:57:33,546 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T21:57:33,599 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:33187/user/jenkins/test-data/3ce363e6-46d1-65d8-e928-056c5e5c6fba/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-14T21:57:33,599 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:33187/user/jenkins/test-data/3ce363e6-46d1-65d8-e928-056c5e5c6fba 2024-11-14T21:57:33,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43815 is added to blk_1073741832_1008 (size=32) 2024-11-14T21:57:33,721 DEBUG [Time-limited test {}] regionserver.TestRegionServerReportForDuty(349): Waiting for master to come online ... 2024-11-14T21:57:34,007 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T21:57:34,009 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-14T21:57:34,011 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-14T21:57:34,011 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T21:57:34,012 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T21:57:34,012 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-14T21:57:34,013 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-14T21:57:34,013 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T21:57:34,014 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T21:57:34,014 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-14T21:57:34,015 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-14T21:57:34,015 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T21:57:34,016 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T21:57:34,016 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-14T21:57:34,017 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-14T21:57:34,017 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T21:57:34,018 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T21:57:34,018 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-14T21:57:34,019 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33187/user/jenkins/test-data/3ce363e6-46d1-65d8-e928-056c5e5c6fba/data/hbase/meta/1588230740 2024-11-14T21:57:34,019 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33187/user/jenkins/test-data/3ce363e6-46d1-65d8-e928-056c5e5c6fba/data/hbase/meta/1588230740 2024-11-14T21:57:34,021 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-14T21:57:34,021 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-14T21:57:34,022 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-14T21:57:34,023 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-14T21:57:34,026 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33187/user/jenkins/test-data/3ce363e6-46d1-65d8-e928-056c5e5c6fba/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T21:57:34,026 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65656701, jitterRate=-0.021638914942741394}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-14T21:57:34,027 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731621451932Initializing all the Stores at 1731621451933 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731621451934 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731621451935 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731621451936 (+1 ms)Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731621451937 (+1 ms)Cleaning up temporary data from old regions at 1731621451938 (+1 ms)Region opened successfully at 1731621451941 (+3 ms) 2024-11-14T21:57:34,027 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-14T21:57:34,027 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-14T21:57:34,027 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-14T21:57:34,027 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 1 ms 2024-11-14T21:57:34,027 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-14T21:57:34,028 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-14T21:57:34,028 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731621451943Disabling compacts and flushes for region at 1731621451944 (+1 ms)Disabling writes for close at 1731621451947 (+3 ms)Writing region close event to WAL at 1731621451948 (+1 ms)Closed at 1731621451950 (+2 ms) 2024-11-14T21:57:34,030 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T21:57:34,030 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-14T21:57:34,030 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-14T21:57:34,032 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-14T21:57:34,034 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-14T21:57:34,050 INFO [RS:0;ef788a4cade8:37019 {}] regionserver.HRegionServer(2659): reportForDuty to master=ef788a4cade8,60537,1731621451654 with port=37019, startcode=1731621451655 2024-11-14T21:57:34,051 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=60537 {}] master.ServerManager(363): Checking decommissioned status of RegionServer ef788a4cade8,37019,1731621451655 2024-11-14T21:57:34,051 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=60537 {}] master.ServerManager(517): Registering regionserver=ef788a4cade8,37019,1731621451655 2024-11-14T21:57:34,054 DEBUG [RS:0;ef788a4cade8:37019 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:33187/user/jenkins/test-data/3ce363e6-46d1-65d8-e928-056c5e5c6fba 2024-11-14T21:57:34,054 DEBUG [RS:0;ef788a4cade8:37019 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:33187 2024-11-14T21:57:34,054 DEBUG [RS:0;ef788a4cade8:37019 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-14T21:57:34,057 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:60537-0x100ac7206040000, quorum=127.0.0.1:59788, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T21:57:34,058 DEBUG [RS:0;ef788a4cade8:37019 {}] zookeeper.ZKUtil(111): regionserver:37019-0x100ac7206040001, quorum=127.0.0.1:59788, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/ef788a4cade8,37019,1731621451655 2024-11-14T21:57:34,058 WARN [RS:0;ef788a4cade8:37019 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T21:57:34,058 INFO [RS:0;ef788a4cade8:37019 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-14T21:57:34,058 DEBUG [RS:0;ef788a4cade8:37019 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:33187/user/jenkins/test-data/3ce363e6-46d1-65d8-e928-056c5e5c6fba/WALs/ef788a4cade8,37019,1731621451655 2024-11-14T21:57:34,058 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [ef788a4cade8,37019,1731621451655] 2024-11-14T21:57:34,061 INFO [RS:0;ef788a4cade8:37019 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-14T21:57:34,063 INFO [RS:0;ef788a4cade8:37019 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-14T21:57:34,063 INFO [RS:0;ef788a4cade8:37019 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-14T21:57:34,063 INFO [RS:0;ef788a4cade8:37019 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T21:57:34,064 INFO [RS:0;ef788a4cade8:37019 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-14T21:57:34,064 INFO [RS:0;ef788a4cade8:37019 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-14T21:57:34,065 INFO [RS:0;ef788a4cade8:37019 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-14T21:57:34,065 DEBUG [RS:0;ef788a4cade8:37019 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/ef788a4cade8:0, corePoolSize=1, maxPoolSize=1 2024-11-14T21:57:34,065 DEBUG [RS:0;ef788a4cade8:37019 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/ef788a4cade8:0, corePoolSize=1, maxPoolSize=1 2024-11-14T21:57:34,065 DEBUG [RS:0;ef788a4cade8:37019 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/ef788a4cade8:0, corePoolSize=1, maxPoolSize=1 2024-11-14T21:57:34,065 DEBUG [RS:0;ef788a4cade8:37019 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/ef788a4cade8:0, corePoolSize=1, maxPoolSize=1 2024-11-14T21:57:34,065 DEBUG [RS:0;ef788a4cade8:37019 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/ef788a4cade8:0, corePoolSize=1, maxPoolSize=1 2024-11-14T21:57:34,065 DEBUG [RS:0;ef788a4cade8:37019 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/ef788a4cade8:0, corePoolSize=2, maxPoolSize=2 2024-11-14T21:57:34,065 DEBUG [RS:0;ef788a4cade8:37019 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/ef788a4cade8:0, corePoolSize=1, maxPoolSize=1 2024-11-14T21:57:34,065 DEBUG [RS:0;ef788a4cade8:37019 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/ef788a4cade8:0, corePoolSize=1, maxPoolSize=1 2024-11-14T21:57:34,065 DEBUG [RS:0;ef788a4cade8:37019 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/ef788a4cade8:0, corePoolSize=1, maxPoolSize=1 2024-11-14T21:57:34,065 DEBUG [RS:0;ef788a4cade8:37019 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/ef788a4cade8:0, corePoolSize=1, maxPoolSize=1 2024-11-14T21:57:34,065 DEBUG [RS:0;ef788a4cade8:37019 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/ef788a4cade8:0, corePoolSize=1, maxPoolSize=1 2024-11-14T21:57:34,065 DEBUG [RS:0;ef788a4cade8:37019 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/ef788a4cade8:0, corePoolSize=1, maxPoolSize=1 2024-11-14T21:57:34,065 DEBUG [RS:0;ef788a4cade8:37019 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/ef788a4cade8:0, corePoolSize=3, maxPoolSize=3 2024-11-14T21:57:34,065 DEBUG [RS:0;ef788a4cade8:37019 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/ef788a4cade8:0, corePoolSize=3, maxPoolSize=3 2024-11-14T21:57:34,067 INFO [RS:0;ef788a4cade8:37019 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T21:57:34,067 INFO [RS:0;ef788a4cade8:37019 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T21:57:34,067 INFO [RS:0;ef788a4cade8:37019 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T21:57:34,067 INFO [RS:0;ef788a4cade8:37019 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-14T21:57:34,067 INFO [RS:0;ef788a4cade8:37019 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-14T21:57:34,067 INFO [RS:0;ef788a4cade8:37019 {}] hbase.ChoreService(168): Chore ScheduledChore name=ef788a4cade8,37019,1731621451655-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T21:57:34,083 INFO [RS:0;ef788a4cade8:37019 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-14T21:57:34,083 INFO [RS:0;ef788a4cade8:37019 {}] hbase.ChoreService(168): Chore ScheduledChore name=ef788a4cade8,37019,1731621451655-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T21:57:34,083 INFO [RS:0;ef788a4cade8:37019 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T21:57:34,083 INFO [RS:0;ef788a4cade8:37019 {}] regionserver.Replication(171): ef788a4cade8,37019,1731621451655 started 2024-11-14T21:57:34,098 INFO [RS:0;ef788a4cade8:37019 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T21:57:34,098 INFO [RS:0;ef788a4cade8:37019 {}] regionserver.HRegionServer(1482): Serving as ef788a4cade8,37019,1731621451655, RpcServer on ef788a4cade8/172.17.0.2:37019, sessionid=0x100ac7206040001 2024-11-14T21:57:34,098 DEBUG [RS:0;ef788a4cade8:37019 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-14T21:57:34,098 DEBUG [RS:0;ef788a4cade8:37019 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager ef788a4cade8,37019,1731621451655 2024-11-14T21:57:34,098 DEBUG [RS:0;ef788a4cade8:37019 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'ef788a4cade8,37019,1731621451655' 2024-11-14T21:57:34,098 DEBUG [RS:0;ef788a4cade8:37019 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-14T21:57:34,099 DEBUG [RS:0;ef788a4cade8:37019 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-14T21:57:34,099 DEBUG [RS:0;ef788a4cade8:37019 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-14T21:57:34,099 DEBUG [RS:0;ef788a4cade8:37019 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-14T21:57:34,099 DEBUG [RS:0;ef788a4cade8:37019 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager ef788a4cade8,37019,1731621451655 2024-11-14T21:57:34,099 DEBUG [RS:0;ef788a4cade8:37019 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'ef788a4cade8,37019,1731621451655' 2024-11-14T21:57:34,099 DEBUG [RS:0;ef788a4cade8:37019 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-14T21:57:34,100 DEBUG [RS:0;ef788a4cade8:37019 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-14T21:57:34,100 DEBUG [RS:0;ef788a4cade8:37019 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-14T21:57:34,100 INFO [RS:0;ef788a4cade8:37019 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-14T21:57:34,100 INFO [RS:0;ef788a4cade8:37019 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-14T21:57:34,184 WARN [ef788a4cade8:60537 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-14T21:57:34,211 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/WALs/ef788a4cade8,52351,1731621131811/ef788a4cade8%2C52351%2C1731621131811.1731621136326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 12 more 2024-11-14T21:57:34,221 DEBUG [Time-limited test {}] regionserver.TestRegionServerReportForDuty(349): Waiting for master to come online ... 2024-11-14T21:57:34,721 DEBUG [Time-limited test {}] regionserver.TestRegionServerReportForDuty(349): Waiting for master to come online ... 2024-11-14T21:57:35,185 WARN [ef788a4cade8:60537 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-14T21:57:35,211 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/WALs/ef788a4cade8,52351,1731621131811/ef788a4cade8%2C52351%2C1731621131811.1731621136326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 12 more 2024-11-14T21:57:35,222 DEBUG [Time-limited test {}] regionserver.TestRegionServerReportForDuty(349): Waiting for master to come online ... 2024-11-14T21:57:35,224 INFO [RS:0;ef788a4cade8:37019 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-14T21:57:35,226 INFO [RS:0;ef788a4cade8:37019 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ef788a4cade8%2C37019%2C1731621451655, suffix=, logDir=hdfs://localhost:33187/user/jenkins/test-data/3ce363e6-46d1-65d8-e928-056c5e5c6fba/WALs/ef788a4cade8,37019,1731621451655, archiveDir=hdfs://localhost:33187/user/jenkins/test-data/3ce363e6-46d1-65d8-e928-056c5e5c6fba/oldWALs, maxLogs=32 2024-11-14T21:57:35,243 DEBUG [RS:0;ef788a4cade8:37019 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/3ce363e6-46d1-65d8-e928-056c5e5c6fba/WALs/ef788a4cade8,37019,1731621451655/ef788a4cade8%2C37019%2C1731621451655.1731621452154, exclude list is [], retry=0 2024-11-14T21:57:35,246 DEBUG [HRegionServer-EventLoopGroup-12-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43815,DS-61162a1d-47a5-4e3d-926a-61222a812f1f,DISK] 2024-11-14T21:57:35,251 INFO [RS:0;ef788a4cade8:37019 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3ce363e6-46d1-65d8-e928-056c5e5c6fba/WALs/ef788a4cade8,37019,1731621451655/ef788a4cade8%2C37019%2C1731621451655.1731621452154 2024-11-14T21:57:35,251 DEBUG [RS:0;ef788a4cade8:37019 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:33213:33213)] 2024-11-14T21:57:35,435 DEBUG [ef788a4cade8:60537 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-14T21:57:35,436 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=ef788a4cade8,37019,1731621451655 2024-11-14T21:57:35,437 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as ef788a4cade8,37019,1731621451655, state=OPENING 2024-11-14T21:57:35,439 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-14T21:57:35,447 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:60537-0x100ac7206040000, quorum=127.0.0.1:59788, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T21:57:35,447 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37019-0x100ac7206040001, quorum=127.0.0.1:59788, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T21:57:35,447 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-14T21:57:35,447 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T21:57:35,447 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T21:57:35,448 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=ef788a4cade8,37019,1731621451655}] 2024-11-14T21:57:35,452 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-14T21:57:35,454 INFO [HRegionServer-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33069, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-14T21:57:35,459 INFO [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-14T21:57:35,459 INFO [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-14T21:57:35,459 INFO [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-14T21:57:35,461 INFO [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ef788a4cade8%2C37019%2C1731621451655.meta, suffix=.meta, logDir=hdfs://localhost:33187/user/jenkins/test-data/3ce363e6-46d1-65d8-e928-056c5e5c6fba/WALs/ef788a4cade8,37019,1731621451655, archiveDir=hdfs://localhost:33187/user/jenkins/test-data/3ce363e6-46d1-65d8-e928-056c5e5c6fba/oldWALs, maxLogs=32 2024-11-14T21:57:35,475 DEBUG [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/3ce363e6-46d1-65d8-e928-056c5e5c6fba/WALs/ef788a4cade8,37019,1731621451655/ef788a4cade8%2C37019%2C1731621451655.meta.1731621452289.meta, exclude list is [], retry=0 2024-11-14T21:57:35,478 DEBUG [HRegionServer-EventLoopGroup-12-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43815,DS-61162a1d-47a5-4e3d-926a-61222a812f1f,DISK] 2024-11-14T21:57:35,480 INFO [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3ce363e6-46d1-65d8-e928-056c5e5c6fba/WALs/ef788a4cade8,37019,1731621451655/ef788a4cade8%2C37019%2C1731621451655.meta.1731621452289.meta 2024-11-14T21:57:35,481 DEBUG [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:33213:33213)] 2024-11-14T21:57:35,481 DEBUG [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-14T21:57:35,481 DEBUG [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-14T21:57:35,482 DEBUG [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-14T21:57:35,482 INFO [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-14T21:57:35,482 DEBUG [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-14T21:57:35,482 DEBUG [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T21:57:35,482 DEBUG [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-14T21:57:35,482 DEBUG [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-14T21:57:35,486 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-14T21:57:35,488 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-14T21:57:35,488 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T21:57:35,488 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T21:57:35,489 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-14T21:57:35,489 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-14T21:57:35,489 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T21:57:35,490 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T21:57:35,490 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-14T21:57:35,491 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-14T21:57:35,491 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T21:57:35,491 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T21:57:35,491 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-14T21:57:35,492 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-14T21:57:35,492 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T21:57:35,493 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T21:57:35,493 DEBUG [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-14T21:57:35,494 DEBUG [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33187/user/jenkins/test-data/3ce363e6-46d1-65d8-e928-056c5e5c6fba/data/hbase/meta/1588230740 2024-11-14T21:57:35,495 DEBUG [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33187/user/jenkins/test-data/3ce363e6-46d1-65d8-e928-056c5e5c6fba/data/hbase/meta/1588230740 2024-11-14T21:57:35,497 DEBUG [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-14T21:57:35,497 DEBUG [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-14T21:57:35,497 DEBUG [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-14T21:57:35,499 DEBUG [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-14T21:57:35,500 INFO [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61622731, jitterRate=-0.08174975216388702}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-14T21:57:35,500 DEBUG [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-14T21:57:35,501 DEBUG [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731621452298Writing region info on filesystem at 1731621452299 (+1 ms)Initializing all the Stores at 1731621452300 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731621452301 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731621452302 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731621452303 (+1 ms)Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731621452304 (+1 ms)Cleaning up temporary data from old regions at 1731621452307 (+3 ms)Running coprocessor post-open hooks at 1731621452309 (+2 ms)Region opened successfully at 1731621452311 (+2 ms) 2024-11-14T21:57:35,502 INFO [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731621452253 2024-11-14T21:57:35,505 DEBUG [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-14T21:57:35,505 INFO [RS_OPEN_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-14T21:57:35,506 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=ef788a4cade8,37019,1731621451655 2024-11-14T21:57:35,508 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as ef788a4cade8,37019,1731621451655, state=OPEN 2024-11-14T21:57:35,509 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37019-0x100ac7206040001, quorum=127.0.0.1:59788, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-14T21:57:35,509 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=ef788a4cade8,37019,1731621451655 2024-11-14T21:57:35,509 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:60537-0x100ac7206040000, quorum=127.0.0.1:59788, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-14T21:57:35,509 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T21:57:35,509 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T21:57:35,513 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-14T21:57:35,513 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=ef788a4cade8,37019,1731621451655 in 139 msec 2024-11-14T21:57:35,517 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-14T21:57:35,517 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 430 msec 2024-11-14T21:57:35,518 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T21:57:35,519 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-14T21:57:35,521 DEBUG [PEWorker-1 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-14T21:57:35,521 DEBUG [PEWorker-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ef788a4cade8,37019,1731621451655, seqNum=-1] 2024-11-14T21:57:35,521 DEBUG [PEWorker-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-14T21:57:35,523 INFO [HRegionServer-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42637, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-14T21:57:35,530 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 709 msec 2024-11-14T21:57:35,530 INFO [master/ef788a4cade8:60537:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731621452541, completionTime=-1 2024-11-14T21:57:35,530 INFO [master/ef788a4cade8:60537:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-14T21:57:35,530 DEBUG [master/ef788a4cade8:60537:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-14T21:57:35,533 INFO [master/ef788a4cade8:60537:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-14T21:57:35,533 INFO [master/ef788a4cade8:60537:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731621512577 2024-11-14T21:57:35,533 INFO [master/ef788a4cade8:60537:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731621572578 2024-11-14T21:57:35,533 INFO [master/ef788a4cade8:60537:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-14T21:57:35,533 INFO [master/ef788a4cade8:60537:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef788a4cade8,60537,1731621451654-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T21:57:35,533 INFO [master/ef788a4cade8:60537:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef788a4cade8,60537,1731621451654-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T21:57:35,533 INFO [master/ef788a4cade8:60537:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef788a4cade8,60537,1731621451654-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T21:57:35,533 INFO [master/ef788a4cade8:60537:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-ef788a4cade8:60537, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T21:57:35,533 INFO [master/ef788a4cade8:60537:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-14T21:57:35,534 INFO [master/ef788a4cade8:60537:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-14T21:57:35,536 DEBUG [master/ef788a4cade8:60537.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-14T21:57:35,539 INFO [master/ef788a4cade8:60537:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.010sec 2024-11-14T21:57:35,539 INFO [master/ef788a4cade8:60537:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-14T21:57:35,539 INFO [master/ef788a4cade8:60537:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-14T21:57:35,539 INFO [master/ef788a4cade8:60537:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-14T21:57:35,539 INFO [master/ef788a4cade8:60537:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-14T21:57:35,539 INFO [master/ef788a4cade8:60537:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-14T21:57:35,539 INFO [master/ef788a4cade8:60537:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef788a4cade8,60537,1731621451654-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T21:57:35,539 INFO [master/ef788a4cade8:60537:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef788a4cade8,60537,1731621451654-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-14T21:57:35,546 DEBUG [master/ef788a4cade8:60537:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-14T21:57:35,546 INFO [master/ef788a4cade8:60537:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-14T21:57:35,546 INFO [master/ef788a4cade8:60537:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef788a4cade8,60537,1731621451654-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T21:57:35,722 DEBUG [Time-limited test {}] regionserver.TestRegionServerReportForDuty(349): Waiting for master to come online ... 2024-11-14T21:57:35,722 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-14T21:57:35,722 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=8106092, stopped=false 2024-11-14T21:57:35,722 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=ef788a4cade8,60537,1731621451654 2024-11-14T21:57:35,725 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-14T21:57:35,725 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:60537-0x100ac7206040000, quorum=127.0.0.1:59788, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T21:57:35,725 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37019-0x100ac7206040001, quorum=127.0.0.1:59788, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T21:57:35,725 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37019-0x100ac7206040001, quorum=127.0.0.1:59788, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T21:57:35,725 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:60537-0x100ac7206040000, quorum=127.0.0.1:59788, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T21:57:35,725 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-14T21:57:35,725 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.regionserver.TestRegionServerReportForDuty.tearDown(TestRegionServerReportForDuty.java:91) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T21:57:35,725 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T21:57:35,726 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:60537-0x100ac7206040000, quorum=127.0.0.1:59788, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T21:57:35,726 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'ef788a4cade8,37019,1731621451655' ***** 2024-11-14T21:57:35,726 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-14T21:57:35,726 INFO [RS:0;ef788a4cade8:37019 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-14T21:57:35,726 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37019-0x100ac7206040001, quorum=127.0.0.1:59788, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T21:57:35,726 INFO [RS:0;ef788a4cade8:37019 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-14T21:57:35,726 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-14T21:57:35,726 INFO [RS:0;ef788a4cade8:37019 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-14T21:57:35,726 INFO [RS:0;ef788a4cade8:37019 {}] regionserver.HRegionServer(959): stopping server ef788a4cade8,37019,1731621451655 2024-11-14T21:57:35,726 INFO [RS:0;ef788a4cade8:37019 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T21:57:35,726 INFO [RS:0;ef788a4cade8:37019 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;ef788a4cade8:37019. 2024-11-14T21:57:35,726 DEBUG [RS:0;ef788a4cade8:37019 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T21:57:35,726 DEBUG [RS:0;ef788a4cade8:37019 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T21:57:35,727 INFO [RS:0;ef788a4cade8:37019 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-14T21:57:35,727 INFO [RS:0;ef788a4cade8:37019 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-14T21:57:35,727 INFO [RS:0;ef788a4cade8:37019 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-14T21:57:35,727 INFO [RS:0;ef788a4cade8:37019 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-14T21:57:35,727 INFO [RS:0;ef788a4cade8:37019 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-14T21:57:35,727 DEBUG [RS:0;ef788a4cade8:37019 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-14T21:57:35,727 DEBUG [RS_CLOSE_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-14T21:57:35,727 DEBUG [RS:0;ef788a4cade8:37019 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-14T21:57:35,727 INFO [RS_CLOSE_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-14T21:57:35,727 DEBUG [RS_CLOSE_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-14T21:57:35,727 DEBUG [RS_CLOSE_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 1 ms 2024-11-14T21:57:35,727 DEBUG [RS_CLOSE_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-14T21:57:35,727 INFO [RS_CLOSE_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-14T21:57:35,747 DEBUG [RS_CLOSE_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33187/user/jenkins/test-data/3ce363e6-46d1-65d8-e928-056c5e5c6fba/data/hbase/meta/1588230740/.tmp/ns/853b21ec111d4277b79635ce9e1e4c31 is 43, key is default/ns:d/1731621452480/Put/seqid=0 2024-11-14T21:57:35,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43815 is added to blk_1073741835_1011 (size=5153) 2024-11-14T21:57:35,769 INFO [regionserver/ef788a4cade8:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T21:57:35,927 DEBUG [RS:0;ef788a4cade8:37019 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-14T21:57:36,128 DEBUG [RS:0;ef788a4cade8:37019 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-14T21:57:36,153 INFO [RS_CLOSE_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33187/user/jenkins/test-data/3ce363e6-46d1-65d8-e928-056c5e5c6fba/data/hbase/meta/1588230740/.tmp/ns/853b21ec111d4277b79635ce9e1e4c31 2024-11-14T21:57:36,160 INFO [regionserver/ef788a4cade8:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-14T21:57:36,160 INFO [regionserver/ef788a4cade8:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-14T21:57:36,162 DEBUG [RS_CLOSE_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33187/user/jenkins/test-data/3ce363e6-46d1-65d8-e928-056c5e5c6fba/data/hbase/meta/1588230740/.tmp/ns/853b21ec111d4277b79635ce9e1e4c31 as hdfs://localhost:33187/user/jenkins/test-data/3ce363e6-46d1-65d8-e928-056c5e5c6fba/data/hbase/meta/1588230740/ns/853b21ec111d4277b79635ce9e1e4c31 2024-11-14T21:57:36,170 INFO [RS_CLOSE_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33187/user/jenkins/test-data/3ce363e6-46d1-65d8-e928-056c5e5c6fba/data/hbase/meta/1588230740/ns/853b21ec111d4277b79635ce9e1e4c31, entries=2, sequenceid=6, filesize=5.0 K 2024-11-14T21:57:36,172 INFO [RS_CLOSE_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 112ms, sequenceid=6, compaction requested=false 2024-11-14T21:57:36,183 DEBUG [RS_CLOSE_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33187/user/jenkins/test-data/3ce363e6-46d1-65d8-e928-056c5e5c6fba/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-14T21:57:36,184 DEBUG [RS_CLOSE_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-14T21:57:36,184 INFO [RS_CLOSE_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-14T21:57:36,184 DEBUG [RS_CLOSE_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731621452758Running coprocessor pre-close hooks at 1731621452759 (+1 ms)Disabling compacts and flushes for region at 1731621452760 (+1 ms)Disabling writes for close at 1731621452763 (+3 ms)Obtaining lock to block concurrent updates at 1731621452765 (+2 ms)Preparing flush snapshotting stores in 1588230740 at 1731621452766 (+1 ms)Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1731621452775 (+9 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731621452779 (+4 ms)Flushing 1588230740/ns: creating writer at 1731621452781 (+2 ms)Flushing 1588230740/ns: appending metadata at 1731621452784 (+3 ms)Flushing 1588230740/ns: closing flushed file at 1731621452785 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@44275c64: reopening flushed file at 1731621452857 (+72 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 112ms, sequenceid=6, compaction requested=false at 1731621452877 (+20 ms)Writing region close event to WAL at 1731621452878 (+1 ms)Running coprocessor post-close hooks at 1731621452886 (+8 ms)Closed at 1731621452888 (+2 ms) 2024-11-14T21:57:36,184 DEBUG [RS_CLOSE_META-regionserver/ef788a4cade8:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-14T21:57:36,212 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/WALs/ef788a4cade8,52351,1731621131811/ef788a4cade8%2C52351%2C1731621131811.1731621136326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 12 more 2024-11-14T21:57:36,328 INFO [RS:0;ef788a4cade8:37019 {}] regionserver.HRegionServer(976): stopping server ef788a4cade8,37019,1731621451655; all regions closed. 2024-11-14T21:57:36,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43815 is added to blk_1073741834_1010 (size=1152) 2024-11-14T21:57:36,334 DEBUG [RS:0;ef788a4cade8:37019 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/3ce363e6-46d1-65d8-e928-056c5e5c6fba/oldWALs 2024-11-14T21:57:36,334 INFO [RS:0;ef788a4cade8:37019 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL ef788a4cade8%2C37019%2C1731621451655.meta:.meta(num 1731621452289) 2024-11-14T21:57:36,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43815 is added to blk_1073741833_1009 (size=93) 2024-11-14T21:57:36,339 DEBUG [RS:0;ef788a4cade8:37019 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/3ce363e6-46d1-65d8-e928-056c5e5c6fba/oldWALs 2024-11-14T21:57:36,339 INFO [RS:0;ef788a4cade8:37019 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL ef788a4cade8%2C37019%2C1731621451655:(num 1731621452154) 2024-11-14T21:57:36,339 DEBUG [RS:0;ef788a4cade8:37019 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T21:57:36,339 INFO [RS:0;ef788a4cade8:37019 {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T21:57:36,340 INFO [RS:0;ef788a4cade8:37019 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T21:57:36,340 INFO [RS:0;ef788a4cade8:37019 {}] hbase.ChoreService(370): Chore service for: regionserver/ef788a4cade8:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-14T21:57:36,340 INFO [RS:0;ef788a4cade8:37019 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T21:57:36,340 INFO [regionserver/ef788a4cade8:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T21:57:36,340 INFO [RS:0;ef788a4cade8:37019 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37019 2024-11-14T21:57:36,343 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37019-0x100ac7206040001, quorum=127.0.0.1:59788, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/ef788a4cade8,37019,1731621451655 2024-11-14T21:57:36,343 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:60537-0x100ac7206040000, quorum=127.0.0.1:59788, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T21:57:36,343 INFO [RS:0;ef788a4cade8:37019 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T21:57:36,345 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [ef788a4cade8,37019,1731621451655] 2024-11-14T21:57:36,347 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/ef788a4cade8,37019,1731621451655 already deleted, retry=false 2024-11-14T21:57:36,347 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; ef788a4cade8,37019,1731621451655 expired; onlineServers=0 2024-11-14T21:57:36,347 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'ef788a4cade8,60537,1731621451654' ***** 2024-11-14T21:57:36,347 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-14T21:57:36,347 INFO [M:0;ef788a4cade8:60537 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T21:57:36,347 INFO [M:0;ef788a4cade8:60537 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T21:57:36,347 DEBUG [M:0;ef788a4cade8:60537 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-14T21:57:36,347 DEBUG [M:0;ef788a4cade8:60537 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-14T21:57:36,347 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-14T21:57:36,347 DEBUG [master/ef788a4cade8:60537:becomeActiveMaster-HFileCleaner.small.0-1731621451887 {}] cleaner.HFileCleaner(306): Exit Thread[master/ef788a4cade8:60537:becomeActiveMaster-HFileCleaner.small.0-1731621451887,5,FailOnTimeoutGroup] 2024-11-14T21:57:36,347 DEBUG [master/ef788a4cade8:60537:becomeActiveMaster-HFileCleaner.large.0-1731621451886 {}] cleaner.HFileCleaner(306): Exit Thread[master/ef788a4cade8:60537:becomeActiveMaster-HFileCleaner.large.0-1731621451886,5,FailOnTimeoutGroup] 2024-11-14T21:57:36,347 INFO [M:0;ef788a4cade8:60537 {}] hbase.ChoreService(370): Chore service for: master/ef788a4cade8:60537 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-14T21:57:36,347 INFO [M:0;ef788a4cade8:60537 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T21:57:36,348 DEBUG [M:0;ef788a4cade8:60537 {}] master.HMaster(1795): Stopping service threads 2024-11-14T21:57:36,348 INFO [M:0;ef788a4cade8:60537 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-14T21:57:36,348 INFO [M:0;ef788a4cade8:60537 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-14T21:57:36,348 INFO [M:0;ef788a4cade8:60537 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-14T21:57:36,348 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-14T21:57:36,349 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:60537-0x100ac7206040000, quorum=127.0.0.1:59788, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-14T21:57:36,349 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:60537-0x100ac7206040000, quorum=127.0.0.1:59788, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T21:57:36,349 DEBUG [M:0;ef788a4cade8:60537 {}] zookeeper.ZKUtil(347): master:60537-0x100ac7206040000, quorum=127.0.0.1:59788, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-14T21:57:36,349 WARN [M:0;ef788a4cade8:60537 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-14T21:57:36,350 INFO [M:0;ef788a4cade8:60537 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:33187/user/jenkins/test-data/3ce363e6-46d1-65d8-e928-056c5e5c6fba/.lastflushedseqids 2024-11-14T21:57:36,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43815 is added to blk_1073741836_1012 (size=99) 2024-11-14T21:57:36,445 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37019-0x100ac7206040001, quorum=127.0.0.1:59788, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T21:57:36,445 INFO [RS:0;ef788a4cade8:37019 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T21:57:36,445 INFO [RS:0;ef788a4cade8:37019 {}] regionserver.HRegionServer(1031): Exiting; stopping=ef788a4cade8,37019,1731621451655; zookeeper connection closed. 2024-11-14T21:57:36,445 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37019-0x100ac7206040001, quorum=127.0.0.1:59788, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T21:57:36,446 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-14T21:57:36,755 INFO [M:0;ef788a4cade8:60537 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-14T21:57:36,756 INFO [M:0;ef788a4cade8:60537 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-14T21:57:36,756 DEBUG [M:0;ef788a4cade8:60537 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-14T21:57:36,756 INFO [M:0;ef788a4cade8:60537 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T21:57:36,756 DEBUG [M:0;ef788a4cade8:60537 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T21:57:36,756 DEBUG [M:0;ef788a4cade8:60537 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 1 ms 2024-11-14T21:57:36,756 DEBUG [M:0;ef788a4cade8:60537 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T21:57:36,756 INFO [M:0;ef788a4cade8:60537 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.53 KB heapSize=11.13 KB 2024-11-14T21:57:36,776 DEBUG [M:0;ef788a4cade8:60537 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33187/user/jenkins/test-data/3ce363e6-46d1-65d8-e928-056c5e5c6fba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/78198f9020a1470199fa73dd2e6ca18f is 82, key is hbase:meta,,1/info:regioninfo/1731621452355/Put/seqid=0 2024-11-14T21:57:36,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43815 is added to blk_1073741837_1013 (size=5672) 2024-11-14T21:57:37,181 INFO [M:0;ef788a4cade8:60537 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=28 (bloomFilter=true), to=hdfs://localhost:33187/user/jenkins/test-data/3ce363e6-46d1-65d8-e928-056c5e5c6fba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/78198f9020a1470199fa73dd2e6ca18f 2024-11-14T21:57:37,204 DEBUG [M:0;ef788a4cade8:60537 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33187/user/jenkins/test-data/3ce363e6-46d1-65d8-e928-056c5e5c6fba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c5864e2436334d3f8658788a9e38d57a is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1731621452530/Put/seqid=0 2024-11-14T21:57:37,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43815 is added to blk_1073741838_1014 (size=5275) 2024-11-14T21:57:37,212 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/WALs/ef788a4cade8,52351,1731621131811/ef788a4cade8%2C52351%2C1731621131811.1731621136326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 12 more 2024-11-14T21:57:37,610 INFO [M:0;ef788a4cade8:60537 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.98 KB at sequenceid=28 (bloomFilter=true), to=hdfs://localhost:33187/user/jenkins/test-data/3ce363e6-46d1-65d8-e928-056c5e5c6fba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c5864e2436334d3f8658788a9e38d57a 2024-11-14T21:57:37,634 DEBUG [M:0;ef788a4cade8:60537 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33187/user/jenkins/test-data/3ce363e6-46d1-65d8-e928-056c5e5c6fba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/541c0211ee114e0699fea58a4adb5eff is 69, key is ef788a4cade8,37019,1731621451655/rs:state/1731621452017/Put/seqid=0 2024-11-14T21:57:37,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43815 is added to blk_1073741839_1015 (size=5156) 2024-11-14T21:57:38,040 INFO [M:0;ef788a4cade8:60537 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=28 (bloomFilter=true), to=hdfs://localhost:33187/user/jenkins/test-data/3ce363e6-46d1-65d8-e928-056c5e5c6fba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/541c0211ee114e0699fea58a4adb5eff 2024-11-14T21:57:38,052 DEBUG [M:0;ef788a4cade8:60537 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33187/user/jenkins/test-data/3ce363e6-46d1-65d8-e928-056c5e5c6fba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/78198f9020a1470199fa73dd2e6ca18f as hdfs://localhost:33187/user/jenkins/test-data/3ce363e6-46d1-65d8-e928-056c5e5c6fba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/78198f9020a1470199fa73dd2e6ca18f 2024-11-14T21:57:38,059 INFO [M:0;ef788a4cade8:60537 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33187/user/jenkins/test-data/3ce363e6-46d1-65d8-e928-056c5e5c6fba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/78198f9020a1470199fa73dd2e6ca18f, entries=8, sequenceid=28, filesize=5.5 K 2024-11-14T21:57:38,060 DEBUG [M:0;ef788a4cade8:60537 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33187/user/jenkins/test-data/3ce363e6-46d1-65d8-e928-056c5e5c6fba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c5864e2436334d3f8658788a9e38d57a as hdfs://localhost:33187/user/jenkins/test-data/3ce363e6-46d1-65d8-e928-056c5e5c6fba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/c5864e2436334d3f8658788a9e38d57a 2024-11-14T21:57:38,067 INFO [M:0;ef788a4cade8:60537 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33187/user/jenkins/test-data/3ce363e6-46d1-65d8-e928-056c5e5c6fba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/c5864e2436334d3f8658788a9e38d57a, entries=3, sequenceid=28, filesize=5.2 K 2024-11-14T21:57:38,068 DEBUG [M:0;ef788a4cade8:60537 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33187/user/jenkins/test-data/3ce363e6-46d1-65d8-e928-056c5e5c6fba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/541c0211ee114e0699fea58a4adb5eff as hdfs://localhost:33187/user/jenkins/test-data/3ce363e6-46d1-65d8-e928-056c5e5c6fba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/541c0211ee114e0699fea58a4adb5eff 2024-11-14T21:57:38,074 INFO [M:0;ef788a4cade8:60537 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33187/user/jenkins/test-data/3ce363e6-46d1-65d8-e928-056c5e5c6fba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/541c0211ee114e0699fea58a4adb5eff, entries=1, sequenceid=28, filesize=5.0 K 2024-11-14T21:57:38,076 INFO [M:0;ef788a4cade8:60537 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.53 KB/7712, heapSize ~10.83 KB/11088, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 166ms, sequenceid=28, compaction requested=false 2024-11-14T21:57:38,079 INFO [M:0;ef788a4cade8:60537 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T21:57:38,079 DEBUG [M:0;ef788a4cade8:60537 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731621452926Disabling compacts and flushes for region at 1731621452927 (+1 ms)Disabling writes for close at 1731621452930 (+3 ms)Obtaining lock to block concurrent updates at 1731621452932 (+2 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731621452933 (+1 ms)Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7712, getHeapSize=11328, getOffHeapSize=0, getCellsCount=35 at 1731621452942 (+9 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731621452946 (+4 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731621452948 (+2 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731621452950 (+2 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731621452951 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731621452986 (+35 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731621452990 (+4 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731621452991 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731621453026 (+35 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731621453028 (+2 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731621453029 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@439d5c56: reopening flushed file at 1731621453063 (+34 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3d2478a0: reopening flushed file at 1731621453072 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@34c20df2: reopening flushed file at 1731621453081 (+9 ms)Finished flush of dataSize ~7.53 KB/7712, heapSize ~10.83 KB/11088, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 166ms, sequenceid=28, compaction requested=false at 1731621453098 (+17 ms)Writing region close event to WAL at 1731621453099 (+1 ms)Closed at 1731621453101 (+2 ms) 2024-11-14T21:57:38,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43815 is added to blk_1073741830_1006 (size=10075) 2024-11-14T21:57:38,082 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(650): complete file /user/jenkins/test-data/3ce363e6-46d1-65d8-e928-056c5e5c6fba/MasterData/WALs/ef788a4cade8,60537,1731621451654/ef788a4cade8%2C60537%2C1731621451654.1731621451746 not finished, retry = 0 2024-11-14T21:57:38,183 INFO [M:0;ef788a4cade8:60537 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-14T21:57:38,183 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T21:57:38,183 INFO [M:0;ef788a4cade8:60537 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:60537 2024-11-14T21:57:38,183 INFO [M:0;ef788a4cade8:60537 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T21:57:38,213 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/WALs/ef788a4cade8,52351,1731621131811/ef788a4cade8%2C52351%2C1731621131811.1731621136326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 12 more 2024-11-14T21:57:38,285 INFO [M:0;ef788a4cade8:60537 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T21:57:38,285 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:60537-0x100ac7206040000, quorum=127.0.0.1:59788, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T21:57:38,285 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:60537-0x100ac7206040000, quorum=127.0.0.1:59788, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T21:57:38,288 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-14T21:57:38,291 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@19b7eb5d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T21:57:38,291 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@251173d3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T21:57:38,291 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T21:57:38,292 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7d67faf4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T21:57:38,292 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2f496114{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/58bf6ba3-87ab-f845-9cbb-3318a545432d/hadoop.log.dir/,STOPPED} 2024-11-14T21:57:38,293 WARN [BP-103333673-172.17.0.2-1731621450597 heartbeating to localhost/127.0.0.1:33187 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T21:57:38,293 WARN [BP-103333673-172.17.0.2-1731621450597 heartbeating to localhost/127.0.0.1:33187 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-103333673-172.17.0.2-1731621450597 (Datanode Uuid 8fb87c64-62a1-444c-a361-1ba6d2b56f6e) service to localhost/127.0.0.1:33187 2024-11-14T21:57:38,293 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T21:57:38,293 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T21:57:38,294 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/58bf6ba3-87ab-f845-9cbb-3318a545432d/cluster_53f5a298-07f1-d7b6-75d6-ea18843da0cd/data/data1/current/BP-103333673-172.17.0.2-1731621450597 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T21:57:38,294 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/58bf6ba3-87ab-f845-9cbb-3318a545432d/cluster_53f5a298-07f1-d7b6-75d6-ea18843da0cd/data/data2/current/BP-103333673-172.17.0.2-1731621450597 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T21:57:38,294 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T21:57:38,300 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@11893ef0{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-14T21:57:38,301 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5152a519{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T21:57:38,301 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T21:57:38,301 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6422d591{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T21:57:38,301 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3aae06e9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/58bf6ba3-87ab-f845-9cbb-3318a545432d/hadoop.log.dir/,STOPPED} 2024-11-14T21:57:38,313 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.TestRegionServerReportForDuty#testReportForDutyWithEnvironmentEdge Thread=98 (was 85) Potentially hanging thread: LeaseRenewer:jenkins@localhost:33187 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HRegionServer-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-8-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-8-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1512999374) connection to localhost/127.0.0.1:33187 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-9-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-9-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-9-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-8-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HRegionServer-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33187 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HRegionServer-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1512999374) connection to localhost/127.0.0.1:33187 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33187 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=415 (was 396) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=329 (was 340), ProcessCount=11 (was 11), AvailableMemoryMB=2456 (was 2652) 2024-11-14T21:57:38,321 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.TestRegionServerReportForDuty#testReportForDutyBackoff Thread=98, OpenFileDescriptor=415, MaxFileDescriptor=1048576, SystemLoadAverage=329, ProcessCount=11, AvailableMemoryMB=2454 2024-11-14T21:57:38,337 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/58bf6ba3-87ab-f845-9cbb-3318a545432d/hadoop.log.dir so I do NOT create it in target/test-data/cc88d65f-cd15-a781-24c6-84a52ae430fa 2024-11-14T21:57:38,337 WARN [Time-limited test {}] hbase.HBaseTestingUtil(405): hadoop.log.dir property value differs in configuration and system: Configuration=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/../logs while System=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/58bf6ba3-87ab-f845-9cbb-3318a545432d/hadoop.log.dir Erasing configuration value by system value. 2024-11-14T21:57:38,337 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/58bf6ba3-87ab-f845-9cbb-3318a545432d/hadoop.tmp.dir so I do NOT create it in target/test-data/cc88d65f-cd15-a781-24c6-84a52ae430fa 2024-11-14T21:57:38,337 WARN [Time-limited test {}] hbase.HBaseTestingUtil(405): hadoop.tmp.dir property value differs in configuration and system: Configuration=/tmp/hadoop-jenkins while System=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/58bf6ba3-87ab-f845-9cbb-3318a545432d/hadoop.tmp.dir Erasing configuration value by system value. 2024-11-14T21:57:38,337 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc88d65f-cd15-a781-24c6-84a52ae430fa 2024-11-14T21:57:38,337 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc88d65f-cd15-a781-24c6-84a52ae430fa/cluster_9d650ce1-0416-b842-e293-1b0ba5cd7401, deleteOnExit=true 2024-11-14T21:57:38,338 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc88d65f-cd15-a781-24c6-84a52ae430fa/test.cache.data in system properties and HBase conf 2024-11-14T21:57:38,338 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc88d65f-cd15-a781-24c6-84a52ae430fa/hadoop.tmp.dir in system properties and HBase conf 2024-11-14T21:57:38,338 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc88d65f-cd15-a781-24c6-84a52ae430fa/hadoop.log.dir in system properties and HBase conf 2024-11-14T21:57:38,338 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc88d65f-cd15-a781-24c6-84a52ae430fa/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-14T21:57:38,338 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc88d65f-cd15-a781-24c6-84a52ae430fa/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-14T21:57:38,338 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-14T21:57:38,338 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-14T21:57:38,338 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc88d65f-cd15-a781-24c6-84a52ae430fa/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-14T21:57:38,339 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc88d65f-cd15-a781-24c6-84a52ae430fa/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-14T21:57:38,339 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc88d65f-cd15-a781-24c6-84a52ae430fa/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-14T21:57:38,339 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc88d65f-cd15-a781-24c6-84a52ae430fa/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-14T21:57:38,339 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc88d65f-cd15-a781-24c6-84a52ae430fa/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-14T21:57:38,339 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc88d65f-cd15-a781-24c6-84a52ae430fa/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-14T21:57:38,339 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc88d65f-cd15-a781-24c6-84a52ae430fa/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-14T21:57:38,339 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc88d65f-cd15-a781-24c6-84a52ae430fa/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-14T21:57:38,339 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc88d65f-cd15-a781-24c6-84a52ae430fa/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-14T21:57:38,339 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc88d65f-cd15-a781-24c6-84a52ae430fa/nfs.dump.dir in system properties and HBase conf 2024-11-14T21:57:38,339 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc88d65f-cd15-a781-24c6-84a52ae430fa/java.io.tmpdir in system properties and HBase conf 2024-11-14T21:57:38,339 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc88d65f-cd15-a781-24c6-84a52ae430fa/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-14T21:57:38,339 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc88d65f-cd15-a781-24c6-84a52ae430fa/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-14T21:57:38,340 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc88d65f-cd15-a781-24c6-84a52ae430fa/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-14T21:57:38,443 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T21:57:38,449 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T21:57:38,450 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T21:57:38,450 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T21:57:38,450 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-14T21:57:38,451 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T21:57:38,452 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@f02ba49{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc88d65f-cd15-a781-24c6-84a52ae430fa/hadoop.log.dir/,AVAILABLE} 2024-11-14T21:57:38,452 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5678fd40{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T21:57:38,588 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7ebf5984{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc88d65f-cd15-a781-24c6-84a52ae430fa/java.io.tmpdir/jetty-localhost-45677-hadoop-hdfs-3_4_1-tests_jar-_-any-3637057243577414362/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-14T21:57:38,589 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6ba18d72{HTTP/1.1, (http/1.1)}{localhost:45677} 2024-11-14T21:57:38,589 INFO [Time-limited test {}] server.Server(415): Started @346030ms 2024-11-14T21:57:38,699 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T21:57:38,703 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T21:57:38,704 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T21:57:38,704 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T21:57:38,705 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-14T21:57:38,705 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4b44fd2f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc88d65f-cd15-a781-24c6-84a52ae430fa/hadoop.log.dir/,AVAILABLE} 2024-11-14T21:57:38,706 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@775901e5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T21:57:38,863 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1be53e99{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc88d65f-cd15-a781-24c6-84a52ae430fa/java.io.tmpdir/jetty-localhost-35413-hadoop-hdfs-3_4_1-tests_jar-_-any-17697865295448076829/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T21:57:38,864 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@368f4caf{HTTP/1.1, (http/1.1)}{localhost:35413} 2024-11-14T21:57:38,864 INFO [Time-limited test {}] server.Server(415): Started @346305ms 2024-11-14T21:57:38,865 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T21:57:38,991 WARN [Thread-656 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc88d65f-cd15-a781-24c6-84a52ae430fa/cluster_9d650ce1-0416-b842-e293-1b0ba5cd7401/data/data1/current/BP-1694646772-172.17.0.2-1731621458374/current, will proceed with Du for space computation calculation, 2024-11-14T21:57:38,991 WARN [Thread-657 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc88d65f-cd15-a781-24c6-84a52ae430fa/cluster_9d650ce1-0416-b842-e293-1b0ba5cd7401/data/data2/current/BP-1694646772-172.17.0.2-1731621458374/current, will proceed with Du for space computation calculation, 2024-11-14T21:57:39,019 WARN [Thread-644 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T21:57:39,022 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2c2ac9699c478219 with lease ID 0x4819da9db4ef06bb: Processing first storage report for DS-a0f7484d-d4e5-4f02-b7e9-7cdc070fff4d from datanode DatanodeRegistration(127.0.0.1:42991, datanodeUuid=6bae18b0-cb44-4e37-bea3-36f0ea6ec33c, infoPort=35971, infoSecurePort=0, ipcPort=33115, storageInfo=lv=-57;cid=testClusterID;nsid=1421821199;c=1731621458374) 2024-11-14T21:57:39,022 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2c2ac9699c478219 with lease ID 0x4819da9db4ef06bb: from storage DS-a0f7484d-d4e5-4f02-b7e9-7cdc070fff4d node DatanodeRegistration(127.0.0.1:42991, datanodeUuid=6bae18b0-cb44-4e37-bea3-36f0ea6ec33c, infoPort=35971, infoSecurePort=0, ipcPort=33115, storageInfo=lv=-57;cid=testClusterID;nsid=1421821199;c=1731621458374), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T21:57:39,022 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2c2ac9699c478219 with lease ID 0x4819da9db4ef06bb: Processing first storage report for DS-1a49d151-f7f9-49bd-acde-58eb2a15a248 from datanode DatanodeRegistration(127.0.0.1:42991, datanodeUuid=6bae18b0-cb44-4e37-bea3-36f0ea6ec33c, infoPort=35971, infoSecurePort=0, ipcPort=33115, storageInfo=lv=-57;cid=testClusterID;nsid=1421821199;c=1731621458374) 2024-11-14T21:57:39,022 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2c2ac9699c478219 with lease ID 0x4819da9db4ef06bb: from storage DS-1a49d151-f7f9-49bd-acde-58eb2a15a248 node DatanodeRegistration(127.0.0.1:42991, datanodeUuid=6bae18b0-cb44-4e37-bea3-36f0ea6ec33c, infoPort=35971, infoSecurePort=0, ipcPort=33115, storageInfo=lv=-57;cid=testClusterID;nsid=1421821199;c=1731621458374), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T21:57:39,104 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc88d65f-cd15-a781-24c6-84a52ae430fa 2024-11-14T21:57:39,105 DEBUG [Time-limited test {}] zookeeper.MiniZooKeeperCluster(246): Failed binding ZK Server to client port: 59240 java.net.BindException: Address already in use at sun.nio.ch.Net.bind0(Native Method) ~[?:?] at sun.nio.ch.Net.bind(Net.java:555) ~[?:?] at sun.nio.ch.ServerSocketChannelImpl.netBind(ServerSocketChannelImpl.java:337) ~[?:?] at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:294) ~[?:?] at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:89) ~[?:?] at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:81) ~[?:?] at org.apache.zookeeper.server.NIOServerCnxnFactory.configure(NIOServerCnxnFactory.java:662) ~[zookeeper-3.8.4.jar:3.8.4] at org.apache.zookeeper.server.ServerCnxnFactory.configure(ServerCnxnFactory.java:109) ~[zookeeper-3.8.4.jar:3.8.4] at org.apache.zookeeper.server.ServerCnxnFactory.configure(ServerCnxnFactory.java:105) ~[zookeeper-3.8.4.jar:3.8.4] at org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster.startup(MiniZooKeeperCluster.java:242) ~[hbase-zookeeper-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseZKTestingUtil.startMiniZKCluster(HBaseZKTestingUtil.java:131) ~[hbase-zookeeper-3.0.0-beta-2-SNAPSHOT-tests.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseZKTestingUtil.startMiniZKCluster(HBaseZKTestingUtil.java:104) ~[hbase-zookeeper-3.0.0-beta-2-SNAPSHOT-tests.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.TestRegionServerReportForDuty.setUp(TestRegionServerReportForDuty.java:84) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.invokeMethod(RunBefores.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:24) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T21:57:39,108 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc88d65f-cd15-a781-24c6-84a52ae430fa/cluster_9d650ce1-0416-b842-e293-1b0ba5cd7401/zookeeper_0, clientPort=59241, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc88d65f-cd15-a781-24c6-84a52ae430fa/cluster_9d650ce1-0416-b842-e293-1b0ba5cd7401/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc88d65f-cd15-a781-24c6-84a52ae430fa/cluster_9d650ce1-0416-b842-e293-1b0ba5cd7401/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-14T21:57:39,109 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=59241 2024-11-14T21:57:39,110 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T21:57:39,112 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T21:57:39,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42991 is added to blk_1073741825_1001 (size=7) 2024-11-14T21:57:39,130 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:42543/user/jenkins/test-data/7150b8a3-0472-55c6-0041-3a236d0923cd with version=8 2024-11-14T21:57:39,132 INFO [Time-limited test {}] client.ConnectionUtils(128): master/ef788a4cade8:0 server-side Connection retries=45 2024-11-14T21:57:39,133 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T21:57:39,133 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T21:57:39,133 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T21:57:39,133 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T21:57:39,133 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T21:57:39,133 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-14T21:57:39,133 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T21:57:39,134 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42075 2024-11-14T21:57:39,135 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:42075 connecting to ZooKeeper ensemble=127.0.0.1:59241 2024-11-14T21:57:39,141 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:420750x0, quorum=127.0.0.1:59241, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T21:57:39,141 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:42075-0x100ac7224c30000 connected 2024-11-14T21:57:39,185 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T21:57:39,186 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T21:57:39,191 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42075-0x100ac7224c30000, quorum=127.0.0.1:59241, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T21:57:39,191 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:42543/user/jenkins/test-data/7150b8a3-0472-55c6-0041-3a236d0923cd, hbase.cluster.distributed=false 2024-11-14T21:57:39,193 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42075-0x100ac7224c30000, quorum=127.0.0.1:59241, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T21:57:39,199 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42075 2024-11-14T21:57:39,199 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42075 2024-11-14T21:57:39,199 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42075 2024-11-14T21:57:39,200 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42075 2024-11-14T21:57:39,200 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42075 2024-11-14 21:57:39,201 Time-limited test DEBUG PluginManager 'Converter' found 47 plugins 2024-11-14 21:57:39,201 Time-limited test DEBUG Starting WriterManager java.io.StringWriter@1d3887d4.false 2024-11-14T21:57:39,203 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/ef788a4cade8:0 server-side Connection retries=45 2024-11-14T21:57:39,203 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T21:57:39,203 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T21:57:39,203 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T21:57:39,203 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T21:57:39,203 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T21:57:39,203 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-14T21:57:39,203 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T21:57:39,207 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34821 2024-11-14T21:57:39,208 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34821 connecting to ZooKeeper ensemble=127.0.0.1:59241 2024-11-14T21:57:39,208 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T21:57:39,210 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T21:57:39,214 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:348210x0, quorum=127.0.0.1:59241, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T21:57:39,214 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/WALs/ef788a4cade8,52351,1731621131811/ef788a4cade8%2C52351%2C1731621131811.1731621136326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 12 more 2024-11-14T21:57:39,215 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34821-0x100ac7224c30001 connected 2024-11-14T21:57:39,215 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34821-0x100ac7224c30001, quorum=127.0.0.1:59241, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T21:57:39,216 DEBUG [M:0;ef788a4cade8:42075 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;ef788a4cade8:42075 2024-11-14T21:57:39,216 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-14T21:57:39,216 INFO [master/ef788a4cade8:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/ef788a4cade8,42075,1731621453115 2024-11-14T21:57:39,217 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-14T21:57:39,218 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42075-0x100ac7224c30000, quorum=127.0.0.1:59241, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T21:57:39,218 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34821-0x100ac7224c30001, quorum=127.0.0.1:59241, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-14T21:57:39,218 DEBUG [master/ef788a4cade8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42075-0x100ac7224c30000, quorum=127.0.0.1:59241, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/ef788a4cade8,42075,1731621453115 2024-11-14T21:57:39,220 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42075-0x100ac7224c30000, quorum=127.0.0.1:59241, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T21:57:39,220 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34821-0x100ac7224c30001, quorum=127.0.0.1:59241, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-14T21:57:39,220 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34821-0x100ac7224c30001, quorum=127.0.0.1:59241, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T21:57:39,220 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34821-0x100ac7224c30001, quorum=127.0.0.1:59241, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T21:57:39,220 DEBUG [master/ef788a4cade8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42075-0x100ac7224c30000, quorum=127.0.0.1:59241, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-14T21:57:39,221 INFO [master/ef788a4cade8:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/ef788a4cade8,42075,1731621453115 from backup master directory 2024-11-14T21:57:39,223 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42075-0x100ac7224c30000, quorum=127.0.0.1:59241, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/ef788a4cade8,42075,1731621453115 2024-11-14T21:57:39,223 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42075-0x100ac7224c30000, quorum=127.0.0.1:59241, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T21:57:39,223 WARN [master/ef788a4cade8:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T21:57:39,223 INFO [master/ef788a4cade8:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=ef788a4cade8,42075,1731621453115 2024-11-14T21:57:39,224 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34821-0x100ac7224c30001, quorum=127.0.0.1:59241, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T21:57:39,226 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34821 2024-11-14T21:57:39,227 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34821 2024-11-14T21:57:39,227 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34821 2024-11-14T21:57:39,228 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34821 2024-11-14T21:57:39,228 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34821 2024-11-14T21:57:39,252 DEBUG [master/ef788a4cade8:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:42543/user/jenkins/test-data/7150b8a3-0472-55c6-0041-3a236d0923cd/hbase.id] with ID: de6080e7-56e4-44e4-94f5-11e2fba1aa85 2024-11-14T21:57:39,252 DEBUG [master/ef788a4cade8:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:42543/user/jenkins/test-data/7150b8a3-0472-55c6-0041-3a236d0923cd/.tmp/hbase.id 2024-11-14T21:57:39,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42991 is added to blk_1073741826_1002 (size=42) 2024-11-14T21:57:39,674 DEBUG [master/ef788a4cade8:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:42543/user/jenkins/test-data/7150b8a3-0472-55c6-0041-3a236d0923cd/.tmp/hbase.id]:[hdfs://localhost:42543/user/jenkins/test-data/7150b8a3-0472-55c6-0041-3a236d0923cd/hbase.id] 2024-11-14T21:57:39,690 INFO [master/ef788a4cade8:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T21:57:39,690 INFO [master/ef788a4cade8:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-14T21:57:39,692 INFO [master/ef788a4cade8:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-14T21:57:39,694 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42075-0x100ac7224c30000, quorum=127.0.0.1:59241, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T21:57:39,694 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34821-0x100ac7224c30001, quorum=127.0.0.1:59241, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T21:57:39,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42991 is added to blk_1073741827_1003 (size=196) 2024-11-14T21:57:40,101 INFO [master/ef788a4cade8:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-14T21:57:40,102 INFO [master/ef788a4cade8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-14T21:57:40,103 INFO [master/ef788a4cade8:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-14T21:57:40,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42991 is added to blk_1073741828_1004 (size=1189) 2024-11-14T21:57:40,215 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/WALs/ef788a4cade8,52351,1731621131811/ef788a4cade8%2C52351%2C1731621131811.1731621136326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor148.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T21:57:40,512 INFO [master/ef788a4cade8:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:42543/user/jenkins/test-data/7150b8a3-0472-55c6-0041-3a236d0923cd/MasterData/data/master/store 2024-11-14T21:57:40,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42991 is added to blk_1073741829_1005 (size=34) 2024-11-14T21:57:40,921 DEBUG [master/ef788a4cade8:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T21:57:40,921 DEBUG [master/ef788a4cade8:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-14T21:57:40,921 INFO [master/ef788a4cade8:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T21:57:40,921 DEBUG [master/ef788a4cade8:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T21:57:40,921 DEBUG [master/ef788a4cade8:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 1 ms 2024-11-14T21:57:40,921 DEBUG [master/ef788a4cade8:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T21:57:40,921 INFO [master/ef788a4cade8:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T21:57:40,921 DEBUG [master/ef788a4cade8:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731621453225Disabling compacts and flushes for region at 1731621453226 (+1 ms)Disabling writes for close at 1731621453229 (+3 ms)Writing region close event to WAL at 1731621453230 (+1 ms)Closed at 1731621453232 (+2 ms) 2024-11-14T21:57:40,922 WARN [master/ef788a4cade8:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:42543/user/jenkins/test-data/7150b8a3-0472-55c6-0041-3a236d0923cd/MasterData/data/master/store/.initializing 2024-11-14T21:57:40,922 DEBUG [master/ef788a4cade8:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:42543/user/jenkins/test-data/7150b8a3-0472-55c6-0041-3a236d0923cd/MasterData/WALs/ef788a4cade8,42075,1731621453115 2024-11-14T21:57:40,924 INFO [master/ef788a4cade8:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-14T21:57:40,926 INFO [master/ef788a4cade8:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ef788a4cade8%2C42075%2C1731621453115, suffix=, logDir=hdfs://localhost:42543/user/jenkins/test-data/7150b8a3-0472-55c6-0041-3a236d0923cd/MasterData/WALs/ef788a4cade8,42075,1731621453115, archiveDir=hdfs://localhost:42543/user/jenkins/test-data/7150b8a3-0472-55c6-0041-3a236d0923cd/MasterData/oldWALs, maxLogs=10 2024-11-14T21:57:40,942 DEBUG [master/ef788a4cade8:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/7150b8a3-0472-55c6-0041-3a236d0923cd/MasterData/WALs/ef788a4cade8,42075,1731621453115/ef788a4cade8%2C42075%2C1731621453115.1731621453235, exclude list is [], retry=0 2024-11-14T21:57:40,947 DEBUG [HRegionServer-EventLoopGroup-14-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42991,DS-a0f7484d-d4e5-4f02-b7e9-7cdc070fff4d,DISK] 2024-11-14T21:57:40,950 INFO [master/ef788a4cade8:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7150b8a3-0472-55c6-0041-3a236d0923cd/MasterData/WALs/ef788a4cade8,42075,1731621453115/ef788a4cade8%2C42075%2C1731621453115.1731621453235 2024-11-14T21:57:40,950 DEBUG [master/ef788a4cade8:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:35971:35971)] 2024-11-14T21:57:40,950 DEBUG [master/ef788a4cade8:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-14T21:57:40,951 DEBUG [master/ef788a4cade8:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T21:57:40,951 DEBUG [master/ef788a4cade8:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T21:57:40,951 DEBUG [master/ef788a4cade8:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T21:57:40,953 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T21:57:40,954 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-14T21:57:40,955 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T21:57:40,955 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T21:57:40,955 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T21:57:40,956 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-14T21:57:40,957 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T21:57:40,957 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T21:57:40,957 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T21:57:40,959 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-14T21:57:40,959 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T21:57:40,959 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T21:57:40,959 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T21:57:40,961 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-14T21:57:40,961 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T21:57:40,961 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T21:57:40,961 DEBUG [master/ef788a4cade8:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T21:57:40,962 DEBUG [master/ef788a4cade8:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42543/user/jenkins/test-data/7150b8a3-0472-55c6-0041-3a236d0923cd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-14T21:57:40,962 DEBUG [master/ef788a4cade8:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42543/user/jenkins/test-data/7150b8a3-0472-55c6-0041-3a236d0923cd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-14T21:57:40,964 DEBUG [master/ef788a4cade8:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T21:57:40,964 DEBUG [master/ef788a4cade8:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T21:57:40,965 DEBUG [master/ef788a4cade8:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-14T21:57:40,966 DEBUG [master/ef788a4cade8:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T21:57:40,968 DEBUG [master/ef788a4cade8:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42543/user/jenkins/test-data/7150b8a3-0472-55c6-0041-3a236d0923cd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T21:57:40,969 INFO [master/ef788a4cade8:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71725254, jitterRate=0.06878957152366638}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-14T21:57:40,969 DEBUG [master/ef788a4cade8:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731621453245Initializing all the Stores at 1731621453246 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731621453247 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731621453248 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731621453249 (+1 ms)Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731621453250 (+1 ms)Cleaning up temporary data from old regions at 1731621453251 (+1 ms)Region opened successfully at 1731621453254 (+3 ms) 2024-11-14T21:57:40,969 INFO [master/ef788a4cade8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-14T21:57:40,973 DEBUG [master/ef788a4cade8:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@38f6605b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=ef788a4cade8/172.17.0.2:0 2024-11-14T21:57:40,974 INFO [master/ef788a4cade8:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-14T21:57:40,974 INFO [master/ef788a4cade8:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-14T21:57:40,974 INFO [master/ef788a4cade8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-14T21:57:40,974 INFO [master/ef788a4cade8:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-14T21:57:40,975 INFO [master/ef788a4cade8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-14T21:57:40,975 INFO [master/ef788a4cade8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-14T21:57:40,975 INFO [master/ef788a4cade8:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-14T21:57:40,977 INFO [master/ef788a4cade8:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-14T21:57:40,978 DEBUG [master/ef788a4cade8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42075-0x100ac7224c30000, quorum=127.0.0.1:59241, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-14T21:57:40,980 DEBUG [master/ef788a4cade8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-14T21:57:40,980 INFO [master/ef788a4cade8:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-14T21:57:40,981 DEBUG [master/ef788a4cade8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42075-0x100ac7224c30000, quorum=127.0.0.1:59241, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-14T21:57:40,982 DEBUG [master/ef788a4cade8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-14T21:57:40,983 INFO [master/ef788a4cade8:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-14T21:57:40,984 DEBUG [master/ef788a4cade8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42075-0x100ac7224c30000, quorum=127.0.0.1:59241, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-14T21:57:40,984 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-14T21:57:40,985 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T21:57:40,987 DEBUG [master/ef788a4cade8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-14T21:57:40,988 DEBUG [master/ef788a4cade8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42075-0x100ac7224c30000, quorum=127.0.0.1:59241, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-14T21:57:40,989 DEBUG [master/ef788a4cade8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-14T21:57:40,992 DEBUG [master/ef788a4cade8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42075-0x100ac7224c30000, quorum=127.0.0.1:59241, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-14T21:57:40,993 DEBUG [master/ef788a4cade8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-14T21:57:40,994 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42075-0x100ac7224c30000, quorum=127.0.0.1:59241, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-14T21:57:40,994 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42075-0x100ac7224c30000, quorum=127.0.0.1:59241, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T21:57:40,994 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34821-0x100ac7224c30001, quorum=127.0.0.1:59241, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-14T21:57:40,994 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34821-0x100ac7224c30001, quorum=127.0.0.1:59241, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T21:57:40,995 INFO [master/ef788a4cade8:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=ef788a4cade8,42075,1731621453115, sessionid=0x100ac7224c30000, setting cluster-up flag (Was=false) 2024-11-14 21:57:40,998 RS:0;ef788a4cade8:34821 ERROR Attempted to append to non-started appender test 2024-11-14T21:57:40,998 INFO [RS:0;ef788a4cade8:34821 {}] regionserver.HRegionServer(746): ClusterId : de6080e7-56e4-44e4-94f5-11e2fba1aa85 2024-11-14T21:57:40,998 DEBUG [RS:0;ef788a4cade8:34821 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-14T21:57:40,999 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34821-0x100ac7224c30001, quorum=127.0.0.1:59241, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T21:57:40,999 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42075-0x100ac7224c30000, quorum=127.0.0.1:59241, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T21:57:41,003 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T21:57:41,004 DEBUG [RS:0;ef788a4cade8:34821 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-14T21:57:41,004 DEBUG [RS:0;ef788a4cade8:34821 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-14T21:57:41,005 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T21:57:41,005 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T21:57:41,005 DEBUG [master/ef788a4cade8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-14T21:57:41,008 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34821-0x100ac7224c30001, quorum=127.0.0.1:59241, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T21:57:41,008 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42075-0x100ac7224c30000, quorum=127.0.0.1:59241, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T21:57:41,008 DEBUG [master/ef788a4cade8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=ef788a4cade8,42075,1731621453115 2024-11-14T21:57:41,014 DEBUG [master/ef788a4cade8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-14T21:57:41,014 DEBUG [RS:0;ef788a4cade8:34821 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-14T21:57:41,014 DEBUG [RS:0;ef788a4cade8:34821 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@73aaaecf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=ef788a4cade8/172.17.0.2:0 2024-11-14T21:57:41,018 DEBUG [master/ef788a4cade8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=ef788a4cade8,42075,1731621453115 2024-11-14T21:57:41,020 INFO [master/ef788a4cade8:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:42543/user/jenkins/test-data/7150b8a3-0472-55c6-0041-3a236d0923cd/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-14T21:57:41,022 DEBUG [master/ef788a4cade8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-14T21:57:41,022 INFO [master/ef788a4cade8:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-14T21:57:41,022 INFO [master/ef788a4cade8:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-14T21:57:41,023 DEBUG [master/ef788a4cade8:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: ef788a4cade8,42075,1731621453115 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-14T21:57:41,024 DEBUG [master/ef788a4cade8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/ef788a4cade8:0, corePoolSize=5, maxPoolSize=5 2024-11-14T21:57:41,024 DEBUG [master/ef788a4cade8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/ef788a4cade8:0, corePoolSize=5, maxPoolSize=5 2024-11-14T21:57:41,024 DEBUG [master/ef788a4cade8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/ef788a4cade8:0, corePoolSize=5, maxPoolSize=5 2024-11-14T21:57:41,024 DEBUG [master/ef788a4cade8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/ef788a4cade8:0, corePoolSize=5, maxPoolSize=5 2024-11-14T21:57:41,024 DEBUG [master/ef788a4cade8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/ef788a4cade8:0, corePoolSize=10, maxPoolSize=10 2024-11-14T21:57:41,024 DEBUG [master/ef788a4cade8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/ef788a4cade8:0, corePoolSize=1, maxPoolSize=1 2024-11-14T21:57:41,024 DEBUG [master/ef788a4cade8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/ef788a4cade8:0, corePoolSize=2, maxPoolSize=2 2024-11-14T21:57:41,024 DEBUG [master/ef788a4cade8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/ef788a4cade8:0, corePoolSize=1, maxPoolSize=1 2024-11-14T21:57:41,030 DEBUG [RS:0;ef788a4cade8:34821 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;ef788a4cade8:34821 2024-11-14T21:57:41,030 INFO [RS:0;ef788a4cade8:34821 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-14T21:57:41,030 INFO [RS:0;ef788a4cade8:34821 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-14 21:57:41,030 RS:0;ef788a4cade8:34821 ERROR Attempted to append to non-started appender test 2024-11-14T21:57:41,030 DEBUG [RS:0;ef788a4cade8:34821 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-14T21:57:41,031 INFO [master/ef788a4cade8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731621483341 2024-11-14T21:57:41,031 INFO [master/ef788a4cade8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-14T21:57:41,031 INFO [master/ef788a4cade8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-14T21:57:41,031 INFO [master/ef788a4cade8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-14T21:57:41,031 INFO [master/ef788a4cade8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-14T21:57:41,031 INFO [master/ef788a4cade8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-14T21:57:41,031 INFO [master/ef788a4cade8:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-14T21:57:41,031 INFO [master/ef788a4cade8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-14 21:57:41,031 RS:0;ef788a4cade8:34821 ERROR Attempted to append to non-started appender test 2024-11-14T21:57:41,031 INFO [RS:0;ef788a4cade8:34821 {}] regionserver.HRegionServer(2659): reportForDuty to master=ef788a4cade8,42075,1731621453115 with port=34821, startcode=1731621453116 2024-11-14T21:57:41,032 INFO [master/ef788a4cade8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-14T21:57:41,032 INFO [master/ef788a4cade8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-14T21:57:41,032 INFO [master/ef788a4cade8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-14T21:57:41,032 DEBUG [RS:0;ef788a4cade8:34821 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-14T21:57:41,032 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T21:57:41,032 INFO [master/ef788a4cade8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-14T21:57:41,032 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-14T21:57:41,032 INFO [master/ef788a4cade8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-14T21:57:41,033 DEBUG [master/ef788a4cade8:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/ef788a4cade8:0:becomeActiveMaster-HFileCleaner.large.0-1731621453359,5,FailOnTimeoutGroup] 2024-11-14T21:57:41,033 DEBUG [master/ef788a4cade8:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/ef788a4cade8:0:becomeActiveMaster-HFileCleaner.small.0-1731621453360,5,FailOnTimeoutGroup] 2024-11-14T21:57:41,033 INFO [master/ef788a4cade8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-14T21:57:41,033 INFO [master/ef788a4cade8:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-14T21:57:41,033 INFO [master/ef788a4cade8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-14T21:57:41,033 INFO [master/ef788a4cade8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-14T21:57:41,034 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T21:57:41,034 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-14T21:57:41,034 INFO [NeverInitializedMaster-EventLoopGroup-13-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44117, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=RegionServerStatusService 2024-11-14T21:57:41,035 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42075 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.regionserver.TestRegionServerReportForDuty$NeverInitializedMaster.checkServiceStarted(TestRegionServerReportForDuty.java:128) ~[test-classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-14 21:57:41,036 RS:0;ef788a4cade8:34821 ERROR Attempted to append to non-started appender test 2024-11-14T21:57:41,035 DEBUG [RS:0;ef788a4cade8:34821 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-14T21:57:41,036 WARN [RS:0;ef788a4cade8:34821 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-14T21:57:41,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42991 is added to blk_1073741831_1007 (size=1321) 2024-11-14T21:57:41,215 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/WALs/ef788a4cade8,52351,1731621131811/ef788a4cade8%2C52351%2C1731621131811.1731621136326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor148.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T21:57:41,442 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:42543/user/jenkins/test-data/7150b8a3-0472-55c6-0041-3a236d0923cd/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-14T21:57:41,442 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:42543/user/jenkins/test-data/7150b8a3-0472-55c6-0041-3a236d0923cd 2024-11-14T21:57:41,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42991 is added to blk_1073741832_1008 (size=32) 2024-11-14T21:57:41,861 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T21:57:41,863 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-14T21:57:41,865 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-14T21:57:41,865 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T21:57:41,865 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T21:57:41,866 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-14T21:57:41,867 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-14T21:57:41,867 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T21:57:41,868 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T21:57:41,868 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-14T21:57:41,869 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-14T21:57:41,869 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T21:57:41,870 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T21:57:41,870 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-14T21:57:41,871 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-14T21:57:41,871 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T21:57:41,871 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T21:57:41,871 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-14T21:57:41,872 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42543/user/jenkins/test-data/7150b8a3-0472-55c6-0041-3a236d0923cd/data/hbase/meta/1588230740 2024-11-14T21:57:41,873 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42543/user/jenkins/test-data/7150b8a3-0472-55c6-0041-3a236d0923cd/data/hbase/meta/1588230740 2024-11-14T21:57:41,874 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-14T21:57:41,874 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-14T21:57:41,875 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-14T21:57:41,876 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-14T21:57:41,878 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42543/user/jenkins/test-data/7150b8a3-0472-55c6-0041-3a236d0923cd/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T21:57:41,879 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70386870, jitterRate=0.04884609580039978}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-14T21:57:41,879 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731621453419Initializing all the Stores at 1731621453420 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731621453421 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731621453422 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731621453423 (+1 ms)Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731621453424 (+1 ms)Cleaning up temporary data from old regions at 1731621453425 (+1 ms)Region opened successfully at 1731621453429 (+4 ms) 2024-11-14T21:57:41,879 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-14T21:57:41,879 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-14T21:57:41,880 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-14T21:57:41,880 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 1 ms 2024-11-14T21:57:41,880 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-14T21:57:41,880 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-14T21:57:41,880 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731621453431Disabling compacts and flushes for region at 1731621453432 (+1 ms)Disabling writes for close at 1731621453435 (+3 ms)Writing region close event to WAL at 1731621453436 (+1 ms)Closed at 1731621453438 (+2 ms) 2024-11-14T21:57:41,882 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T21:57:41,882 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-14T21:57:41,882 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-14T21:57:41,883 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-14T21:57:41,884 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-14T21:57:41,915 INFO [RS:0;ef788a4cade8:34821 {}] regionserver.HRegionServer(2659): reportForDuty to master=ef788a4cade8,42075,1731621453115 with port=34821, startcode=1731621453116 2024-11-14T21:57:41,915 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42075 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.regionserver.TestRegionServerReportForDuty$NeverInitializedMaster.checkServiceStarted(TestRegionServerReportForDuty.java:128) ~[test-classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-14T21:57:41,916 DEBUG [RS:0;ef788a4cade8:34821 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-14T21:57:41,916 WARN [RS:0;ef788a4cade8:34821 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 200 ms and then retrying. 2024-11-14T21:57:42,035 WARN [ef788a4cade8:42075 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-14T21:57:42,216 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/WALs/ef788a4cade8,52351,1731621131811/ef788a4cade8%2C52351%2C1731621131811.1731621136326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor148.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T21:57:43,035 WARN [ef788a4cade8:42075 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-14T21:57:43,217 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/WALs/ef788a4cade8,52351,1731621131811/ef788a4cade8%2C52351%2C1731621131811.1731621136326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor148.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T21:57:43,441 INFO [RS:0;ef788a4cade8:34821 {}] regionserver.HRegionServer(2659): reportForDuty to master=ef788a4cade8,42075,1731621453115 with port=34821, startcode=1731621453116 2024-11-14T21:57:43,442 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42075 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.regionserver.TestRegionServerReportForDuty$NeverInitializedMaster.checkServiceStarted(TestRegionServerReportForDuty.java:128) ~[test-classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-14T21:57:43,443 DEBUG [RS:0;ef788a4cade8:34821 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-14T21:57:43,443 WARN [RS:0;ef788a4cade8:34821 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 400 ms and then retrying. 2024-11-14T21:57:44,036 WARN [ef788a4cade8:42075 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-14T21:57:44,217 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/WALs/ef788a4cade8,52351,1731621131811/ef788a4cade8%2C52351%2C1731621131811.1731621136326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor148.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T21:57:44,967 INFO [RS:0;ef788a4cade8:34821 {}] regionserver.HRegionServer(2659): reportForDuty to master=ef788a4cade8,42075,1731621453115 with port=34821, startcode=1731621453116 2024-11-14T21:57:44,968 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42075 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.regionserver.TestRegionServerReportForDuty$NeverInitializedMaster.checkServiceStarted(TestRegionServerReportForDuty.java:128) ~[test-classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-14T21:57:44,968 DEBUG [RS:0;ef788a4cade8:34821 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-14T21:57:44,969 WARN [RS:0;ef788a4cade8:34821 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 800 ms and then retrying. 2024-11-14T21:57:45,036 WARN [ef788a4cade8:42075 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-14T21:57:45,218 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/WALs/ef788a4cade8,52351,1731621131811/ef788a4cade8%2C52351%2C1731621131811.1731621136326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor148.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T21:57:46,037 WARN [ef788a4cade8:42075 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-14T21:57:46,219 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/WALs/ef788a4cade8,52351,1731621131811/ef788a4cade8%2C52351%2C1731621131811.1731621136326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor148.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T21:57:46,471 INFO [RS:0;ef788a4cade8:34821 {}] regionserver.HRegionServer(2659): reportForDuty to master=ef788a4cade8,42075,1731621453115 with port=34821, startcode=1731621453116 2024-11-14T21:57:46,472 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42075 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.regionserver.TestRegionServerReportForDuty$NeverInitializedMaster.checkServiceStarted(TestRegionServerReportForDuty.java:128) ~[test-classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-14T21:57:46,473 DEBUG [RS:0;ef788a4cade8:34821 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-14T21:57:46,473 WARN [RS:0;ef788a4cade8:34821 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 1600 ms and then retrying. 2024-11-14T21:57:47,037 WARN [ef788a4cade8:42075 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-14T21:57:47,219 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/WALs/ef788a4cade8,52351,1731621131811/ef788a4cade8%2C52351%2C1731621131811.1731621136326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor148.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T21:57:48,038 WARN [ef788a4cade8:42075 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-14T21:57:48,193 INFO [RS:0;ef788a4cade8:34821 {}] regionserver.HRegionServer(2659): reportForDuty to master=ef788a4cade8,42075,1731621453115 with port=34821, startcode=1731621453116 2024-11-14T21:57:48,194 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42075 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.regionserver.TestRegionServerReportForDuty$NeverInitializedMaster.checkServiceStarted(TestRegionServerReportForDuty.java:128) ~[test-classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-14T21:57:48,195 DEBUG [RS:0;ef788a4cade8:34821 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-14T21:57:48,195 WARN [RS:0;ef788a4cade8:34821 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 3200 ms and then retrying. 2024-11-14T21:57:48,220 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/WALs/ef788a4cade8,52351,1731621131811/ef788a4cade8%2C52351%2C1731621131811.1731621136326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor148.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T21:57:49,038 WARN [ef788a4cade8:42075 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-14T21:57:49,221 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36589/user/jenkins/test-data/51071cf1-79be-9c2c-7a64-21ffbd271583/MasterData/WALs/ef788a4cade8,52351,1731621131811/ef788a4cade8%2C52351%2C1731621131811.1731621136326 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor148.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T21:57:49,228 INFO [Time-limited test {}] regionserver.TestRegionServerReportForDuty(154): ClusterId : de6080e7-56e4-44e4-94f5-11e2fba1aa85 About to register with Master. reportForDuty to master=ef788a4cade8,42075,1731621453115 with port=34821, startcode=1731621453116 Master is not running yet reportForDuty failed; sleeping 100 ms and then retrying. reportForDuty to master=ef788a4cade8,42075,1731621453115 with port=34821, startcode=1731621453116 Master is not running yet reportForDuty failed; sleeping 200 ms and then retrying. reportForDuty to master=ef788a4cade8,42075,1731621453115 with port=34821, startcode=1731621453116 Master is not running yet reportForDuty failed; sleeping 400 ms and then retrying. reportForDuty to master=ef788a4cade8,42075,1731621453115 with port=34821, startcode=1731621453116 Master is not running yet reportForDuty failed; sleeping 800 ms and then retrying. reportForDuty to master=ef788a4cade8,42075,1731621453115 with port=34821, startcode=1731621453116 Master is not running yet reportForDuty failed; sleeping 1600 ms and then retrying. reportForDuty to master=ef788a4cade8,42075,1731621453115 with port=34821, startcode=1731621453116 Master is not running yet reportForDuty failed; sleeping 3200 ms and then retrying. 2024-11-14T21:57:49,229 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-14T21:57:49,229 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=641153581, stopped=false 2024-11-14T21:57:49,229 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=ef788a4cade8,42075,1731621453115 2024-11-14T21:57:49,229 INFO [Time-limited test {}] master.HMaster(3321): ***** STOPPING master 'ef788a4cade8,42075,1731621453115' ***** 2024-11-14T21:57:49,229 INFO [Time-limited test {}] master.HMaster(3323): STOPPED: OnlineServer=0 right after cluster shutdown set 2024-11-14T21:57:49,230 INFO [M:0;ef788a4cade8:42075 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T21:57:49,230 INFO [M:0;ef788a4cade8:42075 {}] client.AsyncConnectionImpl(233): Connection has been closed by M:0;ef788a4cade8:42075. 2024-11-14T21:57:49,230 DEBUG [M:0;ef788a4cade8:42075 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:630) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T21:57:49,230 DEBUG [M:0;ef788a4cade8:42075 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T21:57:49,230 INFO [M:0;ef788a4cade8:42075 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T21:57:49,230 DEBUG [M:0;ef788a4cade8:42075 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-14T21:57:49,230 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-14T21:57:49,230 DEBUG [M:0;ef788a4cade8:42075 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-14T21:57:49,230 DEBUG [master/ef788a4cade8:0:becomeActiveMaster-HFileCleaner.small.0-1731621453360 {}] cleaner.HFileCleaner(306): Exit Thread[master/ef788a4cade8:0:becomeActiveMaster-HFileCleaner.small.0-1731621453360,5,FailOnTimeoutGroup] 2024-11-14T21:57:49,230 INFO [M:0;ef788a4cade8:42075 {}] hbase.ChoreService(370): Chore service for: master/ef788a4cade8:0 had [] on shutdown 2024-11-14T21:57:49,230 INFO [M:0;ef788a4cade8:42075 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T21:57:49,230 DEBUG [M:0;ef788a4cade8:42075 {}] master.HMaster(1795): Stopping service threads 2024-11-14T21:57:49,230 INFO [M:0;ef788a4cade8:42075 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-14T21:57:49,230 INFO [M:0;ef788a4cade8:42075 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-14T21:57:49,230 DEBUG [master/ef788a4cade8:0:becomeActiveMaster-HFileCleaner.large.0-1731621453359 {}] cleaner.HFileCleaner(306): Exit Thread[master/ef788a4cade8:0:becomeActiveMaster-HFileCleaner.large.0-1731621453359,5,FailOnTimeoutGroup] 2024-11-14T21:57:49,231 INFO [M:0;ef788a4cade8:42075 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-14T21:57:49,232 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-14T21:57:49,232 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34821-0x100ac7224c30001, quorum=127.0.0.1:59241, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-14T21:57:49,232 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42075-0x100ac7224c30000, quorum=127.0.0.1:59241, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-14T21:57:49,232 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42075-0x100ac7224c30000, quorum=127.0.0.1:59241, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T21:57:49,232 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34821-0x100ac7224c30001, quorum=127.0.0.1:59241, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T21:57:49,232 DEBUG [M:0;ef788a4cade8:42075 {}] zookeeper.ZKUtil(347): master:42075-0x100ac7224c30000, quorum=127.0.0.1:59241, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-14T21:57:49,232 WARN [M:0;ef788a4cade8:42075 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-14T21:57:49,233 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34821-0x100ac7224c30001, quorum=127.0.0.1:59241, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-14T21:57:49,233 INFO [M:0;ef788a4cade8:42075 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:42543/user/jenkins/test-data/7150b8a3-0472-55c6-0041-3a236d0923cd/.lastflushedseqids 2024-11-14T21:57:49,234 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34821-0x100ac7224c30001, quorum=127.0.0.1:59241, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T21:57:49,234 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42075-0x100ac7224c30000, quorum=127.0.0.1:59241, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T21:57:49,234 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34821-0x100ac7224c30001, quorum=127.0.0.1:59241, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T21:57:49,235 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'ef788a4cade8,34821,1731621453116' ***** 2024-11-14T21:57:49,235 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-14T21:57:49,235 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:42075-0x100ac7224c30000, quorum=127.0.0.1:59241, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T21:57:49,235 INFO [RS:0;ef788a4cade8:34821 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-14T21:57:49,235 INFO [RS:0;ef788a4cade8:34821 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-14T21:57:49,235 INFO [RS:0;ef788a4cade8:34821 {}] regionserver.HRegionServer(959): stopping server ef788a4cade8,34821,1731621453116 2024-11-14T21:57:49,235 INFO [RS:0;ef788a4cade8:34821 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T21:57:49,235 INFO [RS:0;ef788a4cade8:34821 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;ef788a4cade8:34821. 2024-11-14T21:57:49,235 DEBUG [RS:0;ef788a4cade8:34821 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T21:57:49,235 DEBUG [RS:0;ef788a4cade8:34821 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T21:57:49,235 INFO [RS:0;ef788a4cade8:34821 {}] regionserver.HRegionServer(976): stopping server ef788a4cade8,34821,1731621453116; all regions closed. 2024-11-14T21:57:49,235 DEBUG [RS:0;ef788a4cade8:34821 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T21:57:49,235 INFO [RS:0;ef788a4cade8:34821 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T21:57:49,235 INFO [RS:0;ef788a4cade8:34821 {}] hbase.ChoreService(370): Chore service for: regionserver/ef788a4cade8:0 had [] on shutdown 2024-11-14T21:57:49,235 INFO [RS:0;ef788a4cade8:34821 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T21:57:49,236 INFO [RS:0;ef788a4cade8:34821 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34821 2024-11-14T21:57:49,240 DEBUG [RS:0;ef788a4cade8:34821 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/rs/ef788a4cade8,34821,1731621453116 already deleted, retry=false 2024-11-14T21:57:49,240 INFO [RS:0;ef788a4cade8:34821 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T21:57:49,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42991 is added to blk_1073741833_1009 (size=1) 2024-11-14T21:57:49,342 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34821-0x100ac7224c30001, quorum=127.0.0.1:59241, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T21:57:49,342 INFO [RS:0;ef788a4cade8:34821 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T21:57:49,342 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34821-0x100ac7224c30001, quorum=127.0.0.1:59241, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T21:57:49,342 INFO [RS:0;ef788a4cade8:34821 {}] regionserver.HRegionServer(1031): Exiting; stopping=ef788a4cade8,34821,1731621453116; zookeeper connection closed. 2024-11-14T21:57:49,343 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-14T21:57:49,645 INFO [M:0;ef788a4cade8:42075 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-14T21:57:49,789 DEBUG [ef788a4cade8:42075 {}] assignment.AssignmentManager(2447): Stopped! Dropping assign of 1 queued regions. 2024-11-14T21:57:49,789 INFO [M:0;ef788a4cade8:42075 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-14T21:57:49,789 DEBUG [M:0;ef788a4cade8:42075 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-14T21:57:49,789 INFO [M:0;ef788a4cade8:42075 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T21:57:49,789 DEBUG [M:0;ef788a4cade8:42075 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T21:57:49,789 DEBUG [M:0;ef788a4cade8:42075 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 1 ms 2024-11-14T21:57:49,789 DEBUG [M:0;ef788a4cade8:42075 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T21:57:49,789 INFO [M:0;ef788a4cade8:42075 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=2.12 KB heapSize=3.71 KB 2024-11-14T21:57:49,808 DEBUG [M:0;ef788a4cade8:42075 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42543/user/jenkins/test-data/7150b8a3-0472-55c6-0041-3a236d0923cd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/48a1e57f3dd74f57a67edf9f5229a31b is 352, key is \x00\x00\x00\x00\x00\x00\x00\x02/proc:d/1731621453483/Put/seqid=0 2024-11-14T21:57:49,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42991 is added to blk_1073741834_1010 (size=5546) 2024-11-14T21:57:49,813 INFO [M:0;ef788a4cade8:42075 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.12 KB at sequenceid=10 (bloomFilter=true), to=hdfs://localhost:42543/user/jenkins/test-data/7150b8a3-0472-55c6-0041-3a236d0923cd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/48a1e57f3dd74f57a67edf9f5229a31b 2024-11-14T21:57:49,821 DEBUG [M:0;ef788a4cade8:42075 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42543/user/jenkins/test-data/7150b8a3-0472-55c6-0041-3a236d0923cd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/48a1e57f3dd74f57a67edf9f5229a31b as hdfs://localhost:42543/user/jenkins/test-data/7150b8a3-0472-55c6-0041-3a236d0923cd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/48a1e57f3dd74f57a67edf9f5229a31b 2024-11-14T21:57:49,829 INFO [M:0;ef788a4cade8:42075 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42543/user/jenkins/test-data/7150b8a3-0472-55c6-0041-3a236d0923cd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/48a1e57f3dd74f57a67edf9f5229a31b, entries=2, sequenceid=10, filesize=5.4 K 2024-11-14T21:57:49,831 INFO [M:0;ef788a4cade8:42075 {}] regionserver.HRegion(3140): Finished flush of dataSize ~2.12 KB/2170, heapSize ~2.95 KB/3016, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 63ms, sequenceid=10, compaction requested=false 2024-11-14T21:57:49,832 INFO [M:0;ef788a4cade8:42075 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T21:57:49,832 DEBUG [M:0;ef788a4cade8:42075 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731621454056Disabling compacts and flushes for region at 1731621454057 (+1 ms)Disabling writes for close at 1731621454060 (+3 ms)Obtaining lock to block concurrent updates at 1731621454062 (+2 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731621454063 (+1 ms)Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=2170, getHeapSize=3736, getOffHeapSize=0, getCellsCount=8 at 1731621454072 (+9 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731621454076 (+4 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731621454078 (+2 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731621454080 (+2 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731621454081 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@27ea9309: reopening flushed file at 1731621454108 (+27 ms)Finished flush of dataSize ~2.12 KB/2170, heapSize ~2.95 KB/3016, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 63ms, sequenceid=10, compaction requested=false at 1731621454125 (+17 ms)Writing region close event to WAL at 1731621454126 (+1 ms)Closed at 1731621454128 (+2 ms) 2024-11-14T21:57:49,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42991 is added to blk_1073741830_1006 (size=3277) 2024-11-14T21:57:49,841 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(650): complete file /user/jenkins/test-data/7150b8a3-0472-55c6-0041-3a236d0923cd/MasterData/WALs/ef788a4cade8,42075,1731621453115/ef788a4cade8%2C42075%2C1731621453115.1731621453235 not finished, retry = 0 2024-11-14T21:57:49,943 INFO [M:0;ef788a4cade8:42075 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-14T21:57:49,943 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T21:57:49,943 INFO [M:0;ef788a4cade8:42075 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42075 2024-11-14T21:57:49,943 INFO [M:0;ef788a4cade8:42075 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T21:57:50,045 INFO [M:0;ef788a4cade8:42075 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T21:57:50,045 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42075-0x100ac7224c30000, quorum=127.0.0.1:59241, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T21:57:50,045 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42075-0x100ac7224c30000, quorum=127.0.0.1:59241, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T21:57:50,048 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-14T21:57:50,051 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1be53e99{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T21:57:50,051 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@368f4caf{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T21:57:50,051 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T21:57:50,051 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@775901e5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T21:57:50,051 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4b44fd2f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc88d65f-cd15-a781-24c6-84a52ae430fa/hadoop.log.dir/,STOPPED} 2024-11-14T21:57:50,053 WARN [BP-1694646772-172.17.0.2-1731621458374 heartbeating to localhost/127.0.0.1:42543 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T21:57:50,053 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T21:57:50,053 WARN [BP-1694646772-172.17.0.2-1731621458374 heartbeating to localhost/127.0.0.1:42543 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1694646772-172.17.0.2-1731621458374 (Datanode Uuid 6bae18b0-cb44-4e37-bea3-36f0ea6ec33c) service to localhost/127.0.0.1:42543 2024-11-14T21:57:50,053 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T21:57:50,053 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc88d65f-cd15-a781-24c6-84a52ae430fa/cluster_9d650ce1-0416-b842-e293-1b0ba5cd7401/data/data1/current/BP-1694646772-172.17.0.2-1731621458374 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T21:57:50,054 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc88d65f-cd15-a781-24c6-84a52ae430fa/cluster_9d650ce1-0416-b842-e293-1b0ba5cd7401/data/data2/current/BP-1694646772-172.17.0.2-1731621458374 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T21:57:50,054 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T21:57:50,060 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7ebf5984{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-14T21:57:50,060 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6ba18d72{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T21:57:50,060 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T21:57:50,061 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5678fd40{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T21:57:50,061 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@f02ba49{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc88d65f-cd15-a781-24c6-84a52ae430fa/hadoop.log.dir/,STOPPED} 2024-11-14T21:57:50,072 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.TestRegionServerReportForDuty#testReportForDutyBackoff Thread=106 (was 98) Potentially hanging thread: ActiveMasterInitializationMonitor-1731621453286 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.master.MasterInitializationMonitor.run(MasterInitializationMonitor.java:63) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: NeverInitializedMaster-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HRegionServer-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: NeverInitializedMaster-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-11-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42543 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/ef788a4cade8:0:becomeActiveMaster java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) app//org.apache.hadoop.hbase.master.procedure.InitMetaProcedure.await(InitMetaProcedure.java:192) app//org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:1113) app//org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2535) app//org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:613) app//org.apache.hadoop.hbase.master.HMaster$$Lambda$372/0x00007f97c492b580.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.lambda$tracedRunnable$2(TraceUtil.java:155) app//org.apache.hadoop.hbase.trace.TraceUtil$$Lambda$354/0x00007f97c48dadf0.run(Unknown Source) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42543 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HRegionServer-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-10-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:42543 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-10-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1512999374) connection to localhost/127.0.0.1:42543 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-11-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-11-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1512999374) connection to localhost/127.0.0.1:42543 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-10-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=432 (was 415) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=354 (was 329) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=2310 (was 2454)