2024-11-24 02:49:06,870 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-11-24 02:49:06,885 main DEBUG Took 0.013275 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-24 02:49:06,886 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-24 02:49:06,886 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-24 02:49:06,887 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-24 02:49:06,889 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 02:49:06,908 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-24 02:49:06,924 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 02:49:06,926 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 02:49:06,927 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 02:49:06,927 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 02:49:06,928 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 02:49:06,928 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 02:49:06,929 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 02:49:06,930 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 02:49:06,930 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 02:49:06,931 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 02:49:06,932 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 02:49:06,932 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 02:49:06,933 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 02:49:06,933 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 02:49:06,934 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 02:49:06,934 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 02:49:06,934 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 02:49:06,935 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 02:49:06,935 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 02:49:06,935 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 02:49:06,936 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 02:49:06,936 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 02:49:06,936 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 02:49:06,937 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 02:49:06,937 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 02:49:06,937 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-24 02:49:06,939 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 02:49:06,940 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-24 02:49:06,942 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-24 02:49:06,942 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-24 02:49:06,943 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-24 02:49:06,944 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-24 02:49:06,952 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-24 02:49:06,955 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-24 02:49:06,956 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-24 02:49:06,957 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-24 02:49:06,957 main DEBUG createAppenders(={Console}) 2024-11-24 02:49:06,958 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba initialized 2024-11-24 02:49:06,958 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-11-24 02:49:06,959 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba OK. 2024-11-24 02:49:06,959 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-24 02:49:06,959 main DEBUG OutputStream closed 2024-11-24 02:49:06,960 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-24 02:49:06,960 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-24 02:49:06,960 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@49c7b90e OK 2024-11-24 02:49:07,041 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-24 02:49:07,043 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-24 02:49:07,044 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-24 02:49:07,045 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-24 02:49:07,046 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-24 02:49:07,046 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-24 02:49:07,046 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-24 02:49:07,047 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-24 02:49:07,047 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-24 02:49:07,047 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-24 02:49:07,047 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-24 02:49:07,048 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-24 02:49:07,048 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-24 02:49:07,048 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-24 02:49:07,049 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-24 02:49:07,049 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-24 02:49:07,049 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-24 02:49:07,050 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-24 02:49:07,052 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-24 02:49:07,052 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@35432107) with optional ClassLoader: null 2024-11-24 02:49:07,053 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-24 02:49:07,053 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@35432107] started OK. 2024-11-24T02:49:07,067 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC timeout: 26 mins 2024-11-24 02:49:07,070 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-24 02:49:07,070 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-24T02:49:07,292 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ffbd484f-b731-e1f6-1e1c-28d5ac9f75b5 2024-11-24T02:49:07,316 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ffbd484f-b731-e1f6-1e1c-28d5ac9f75b5/cluster_005b2254-078b-8f09-7365-d38ea994b44a, deleteOnExit=true 2024-11-24T02:49:07,317 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ffbd484f-b731-e1f6-1e1c-28d5ac9f75b5/test.cache.data in system properties and HBase conf 2024-11-24T02:49:07,318 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ffbd484f-b731-e1f6-1e1c-28d5ac9f75b5/hadoop.tmp.dir in system properties and HBase conf 2024-11-24T02:49:07,319 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ffbd484f-b731-e1f6-1e1c-28d5ac9f75b5/hadoop.log.dir in system properties and HBase conf 2024-11-24T02:49:07,319 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ffbd484f-b731-e1f6-1e1c-28d5ac9f75b5/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-24T02:49:07,320 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ffbd484f-b731-e1f6-1e1c-28d5ac9f75b5/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-24T02:49:07,320 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-24T02:49:07,402 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-24T02:49:07,492 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-24T02:49:07,496 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ffbd484f-b731-e1f6-1e1c-28d5ac9f75b5/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-24T02:49:07,496 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ffbd484f-b731-e1f6-1e1c-28d5ac9f75b5/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-24T02:49:07,497 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ffbd484f-b731-e1f6-1e1c-28d5ac9f75b5/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-24T02:49:07,497 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ffbd484f-b731-e1f6-1e1c-28d5ac9f75b5/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T02:49:07,498 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ffbd484f-b731-e1f6-1e1c-28d5ac9f75b5/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-24T02:49:07,498 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ffbd484f-b731-e1f6-1e1c-28d5ac9f75b5/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-24T02:49:07,499 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ffbd484f-b731-e1f6-1e1c-28d5ac9f75b5/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T02:49:07,499 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ffbd484f-b731-e1f6-1e1c-28d5ac9f75b5/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T02:49:07,500 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ffbd484f-b731-e1f6-1e1c-28d5ac9f75b5/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-24T02:49:07,500 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ffbd484f-b731-e1f6-1e1c-28d5ac9f75b5/nfs.dump.dir in system properties and HBase conf 2024-11-24T02:49:07,500 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ffbd484f-b731-e1f6-1e1c-28d5ac9f75b5/java.io.tmpdir in system properties and HBase conf 2024-11-24T02:49:07,501 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ffbd484f-b731-e1f6-1e1c-28d5ac9f75b5/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T02:49:07,501 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ffbd484f-b731-e1f6-1e1c-28d5ac9f75b5/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-24T02:49:07,502 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ffbd484f-b731-e1f6-1e1c-28d5ac9f75b5/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-24T02:49:08,642 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-24T02:49:08,717 INFO [Time-limited test {}] log.Log(170): Logging initialized @2501ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-24T02:49:08,787 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T02:49:08,844 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T02:49:08,872 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T02:49:08,872 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T02:49:08,874 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T02:49:08,890 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T02:49:08,894 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@21b7d177{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ffbd484f-b731-e1f6-1e1c-28d5ac9f75b5/hadoop.log.dir/,AVAILABLE} 2024-11-24T02:49:08,896 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@383d55e4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T02:49:09,080 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@76e4c45c{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ffbd484f-b731-e1f6-1e1c-28d5ac9f75b5/java.io.tmpdir/jetty-localhost-41519-hadoop-hdfs-3_4_1-tests_jar-_-any-5878979806726054728/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T02:49:09,087 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4637aff6{HTTP/1.1, (http/1.1)}{localhost:41519} 2024-11-24T02:49:09,087 INFO [Time-limited test {}] server.Server(415): Started @2872ms 2024-11-24T02:49:09,590 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T02:49:09,597 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T02:49:09,598 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T02:49:09,598 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T02:49:09,598 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T02:49:09,599 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@550154bd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ffbd484f-b731-e1f6-1e1c-28d5ac9f75b5/hadoop.log.dir/,AVAILABLE} 2024-11-24T02:49:09,599 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1a2478ad{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T02:49:09,698 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4839957b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ffbd484f-b731-e1f6-1e1c-28d5ac9f75b5/java.io.tmpdir/jetty-localhost-45449-hadoop-hdfs-3_4_1-tests_jar-_-any-6371638185305655118/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T02:49:09,698 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5306f615{HTTP/1.1, (http/1.1)}{localhost:45449} 2024-11-24T02:49:09,699 INFO [Time-limited test {}] server.Server(415): Started @3484ms 2024-11-24T02:49:09,747 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T02:49:09,855 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T02:49:09,862 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T02:49:09,863 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T02:49:09,864 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T02:49:09,864 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T02:49:09,865 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6463ad04{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ffbd484f-b731-e1f6-1e1c-28d5ac9f75b5/hadoop.log.dir/,AVAILABLE} 2024-11-24T02:49:09,865 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7fa8fa5c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T02:49:09,973 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1c6b8f01{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ffbd484f-b731-e1f6-1e1c-28d5ac9f75b5/java.io.tmpdir/jetty-localhost-35951-hadoop-hdfs-3_4_1-tests_jar-_-any-7511492697972743529/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T02:49:09,974 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@11f28dd2{HTTP/1.1, (http/1.1)}{localhost:35951} 2024-11-24T02:49:09,974 INFO [Time-limited test {}] server.Server(415): Started @3759ms 2024-11-24T02:49:09,977 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T02:49:10,027 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T02:49:10,034 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T02:49:10,042 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T02:49:10,042 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T02:49:10,042 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T02:49:10,043 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@c62369b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ffbd484f-b731-e1f6-1e1c-28d5ac9f75b5/hadoop.log.dir/,AVAILABLE} 2024-11-24T02:49:10,044 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@24f92c39{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T02:49:10,153 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2e59159d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ffbd484f-b731-e1f6-1e1c-28d5ac9f75b5/java.io.tmpdir/jetty-localhost-38869-hadoop-hdfs-3_4_1-tests_jar-_-any-3429248340370118031/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T02:49:10,154 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@a8e922f{HTTP/1.1, (http/1.1)}{localhost:38869} 2024-11-24T02:49:10,154 INFO [Time-limited test {}] server.Server(415): Started @3939ms 2024-11-24T02:49:10,156 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T02:49:11,588 WARN [Thread-124 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ffbd484f-b731-e1f6-1e1c-28d5ac9f75b5/cluster_005b2254-078b-8f09-7365-d38ea994b44a/data/data4/current/BP-1181736044-172.17.0.2-1732416548023/current, will proceed with Du for space computation calculation, 2024-11-24T02:49:11,588 WARN [Thread-123 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ffbd484f-b731-e1f6-1e1c-28d5ac9f75b5/cluster_005b2254-078b-8f09-7365-d38ea994b44a/data/data1/current/BP-1181736044-172.17.0.2-1732416548023/current, will proceed with Du for space computation calculation, 2024-11-24T02:49:11,588 WARN [Thread-125 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ffbd484f-b731-e1f6-1e1c-28d5ac9f75b5/cluster_005b2254-078b-8f09-7365-d38ea994b44a/data/data2/current/BP-1181736044-172.17.0.2-1732416548023/current, will proceed with Du for space computation calculation, 2024-11-24T02:49:11,588 WARN [Thread-122 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ffbd484f-b731-e1f6-1e1c-28d5ac9f75b5/cluster_005b2254-078b-8f09-7365-d38ea994b44a/data/data3/current/BP-1181736044-172.17.0.2-1732416548023/current, will proceed with Du for space computation calculation, 2024-11-24T02:49:11,624 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T02:49:11,624 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T02:49:11,649 WARN [Thread-143 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ffbd484f-b731-e1f6-1e1c-28d5ac9f75b5/cluster_005b2254-078b-8f09-7365-d38ea994b44a/data/data6/current/BP-1181736044-172.17.0.2-1732416548023/current, will proceed with Du for space computation calculation, 2024-11-24T02:49:11,649 WARN [Thread-142 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ffbd484f-b731-e1f6-1e1c-28d5ac9f75b5/cluster_005b2254-078b-8f09-7365-d38ea994b44a/data/data5/current/BP-1181736044-172.17.0.2-1732416548023/current, will proceed with Du for space computation calculation, 2024-11-24T02:49:11,677 WARN [Thread-103 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T02:49:11,682 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x979f824a54d346d4 with lease ID 0x13f031aff682b64e: Processing first storage report for DS-f5a58f9f-f65c-40c8-b1a5-1bab33157e2e from datanode DatanodeRegistration(127.0.0.1:46783, datanodeUuid=88ce32c5-a72d-484f-a1b8-a80923091096, infoPort=44347, infoSecurePort=0, ipcPort=33873, storageInfo=lv=-57;cid=testClusterID;nsid=181007860;c=1732416548023) 2024-11-24T02:49:11,683 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x979f824a54d346d4 with lease ID 0x13f031aff682b64e: from storage DS-f5a58f9f-f65c-40c8-b1a5-1bab33157e2e node DatanodeRegistration(127.0.0.1:46783, datanodeUuid=88ce32c5-a72d-484f-a1b8-a80923091096, infoPort=44347, infoSecurePort=0, ipcPort=33873, storageInfo=lv=-57;cid=testClusterID;nsid=181007860;c=1732416548023), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-11-24T02:49:11,684 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3049209d7cdb2e2 with lease ID 0x13f031aff682b64f: Processing first storage report for DS-c41e35ca-2145-475d-a7f5-63b786089768 from datanode DatanodeRegistration(127.0.0.1:39229, datanodeUuid=7b7cce00-44f4-4dad-8b82-47c20140b8b3, infoPort=42161, infoSecurePort=0, ipcPort=38259, storageInfo=lv=-57;cid=testClusterID;nsid=181007860;c=1732416548023) 2024-11-24T02:49:11,684 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3049209d7cdb2e2 with lease ID 0x13f031aff682b64f: from storage DS-c41e35ca-2145-475d-a7f5-63b786089768 node DatanodeRegistration(127.0.0.1:39229, datanodeUuid=7b7cce00-44f4-4dad-8b82-47c20140b8b3, infoPort=42161, infoSecurePort=0, ipcPort=38259, storageInfo=lv=-57;cid=testClusterID;nsid=181007860;c=1732416548023), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T02:49:11,684 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8c7ce71b7bfb2ae7 with lease ID 0x13f031aff682b650: Processing first storage report for DS-f7a361fc-7cb8-43a4-8416-814275cf21f9 from datanode DatanodeRegistration(127.0.0.1:41067, datanodeUuid=52734fc8-d62f-4a2b-8532-31f88e73bf9c, infoPort=38515, infoSecurePort=0, ipcPort=40061, storageInfo=lv=-57;cid=testClusterID;nsid=181007860;c=1732416548023) 2024-11-24T02:49:11,684 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8c7ce71b7bfb2ae7 with lease ID 0x13f031aff682b650: from storage DS-f7a361fc-7cb8-43a4-8416-814275cf21f9 node DatanodeRegistration(127.0.0.1:41067, datanodeUuid=52734fc8-d62f-4a2b-8532-31f88e73bf9c, infoPort=38515, infoSecurePort=0, ipcPort=40061, storageInfo=lv=-57;cid=testClusterID;nsid=181007860;c=1732416548023), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T02:49:11,685 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x979f824a54d346d4 with lease ID 0x13f031aff682b64e: Processing first storage report for DS-6250b04f-9fb3-453c-85db-326c332dc8d0 from datanode DatanodeRegistration(127.0.0.1:46783, datanodeUuid=88ce32c5-a72d-484f-a1b8-a80923091096, infoPort=44347, infoSecurePort=0, ipcPort=33873, storageInfo=lv=-57;cid=testClusterID;nsid=181007860;c=1732416548023) 2024-11-24T02:49:11,685 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x979f824a54d346d4 with lease ID 0x13f031aff682b64e: from storage DS-6250b04f-9fb3-453c-85db-326c332dc8d0 node DatanodeRegistration(127.0.0.1:46783, datanodeUuid=88ce32c5-a72d-484f-a1b8-a80923091096, infoPort=44347, infoSecurePort=0, ipcPort=33873, storageInfo=lv=-57;cid=testClusterID;nsid=181007860;c=1732416548023), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T02:49:11,685 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3049209d7cdb2e2 with lease ID 0x13f031aff682b64f: Processing first storage report for DS-03db56c0-92a8-4f54-a942-961f25d97ac5 from datanode DatanodeRegistration(127.0.0.1:39229, datanodeUuid=7b7cce00-44f4-4dad-8b82-47c20140b8b3, infoPort=42161, infoSecurePort=0, ipcPort=38259, storageInfo=lv=-57;cid=testClusterID;nsid=181007860;c=1732416548023) 2024-11-24T02:49:11,685 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3049209d7cdb2e2 with lease ID 0x13f031aff682b64f: from storage DS-03db56c0-92a8-4f54-a942-961f25d97ac5 node DatanodeRegistration(127.0.0.1:39229, datanodeUuid=7b7cce00-44f4-4dad-8b82-47c20140b8b3, infoPort=42161, infoSecurePort=0, ipcPort=38259, storageInfo=lv=-57;cid=testClusterID;nsid=181007860;c=1732416548023), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T02:49:11,685 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8c7ce71b7bfb2ae7 with lease ID 0x13f031aff682b650: Processing first storage report for DS-77c44d42-6084-43c2-9094-c558a6a325f7 from datanode DatanodeRegistration(127.0.0.1:41067, datanodeUuid=52734fc8-d62f-4a2b-8532-31f88e73bf9c, infoPort=38515, infoSecurePort=0, ipcPort=40061, storageInfo=lv=-57;cid=testClusterID;nsid=181007860;c=1732416548023) 2024-11-24T02:49:11,685 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8c7ce71b7bfb2ae7 with lease ID 0x13f031aff682b650: from storage DS-77c44d42-6084-43c2-9094-c558a6a325f7 node DatanodeRegistration(127.0.0.1:41067, datanodeUuid=52734fc8-d62f-4a2b-8532-31f88e73bf9c, infoPort=38515, infoSecurePort=0, ipcPort=40061, storageInfo=lv=-57;cid=testClusterID;nsid=181007860;c=1732416548023), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T02:49:11,726 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ffbd484f-b731-e1f6-1e1c-28d5ac9f75b5 2024-11-24T02:49:11,798 WARN [Time-limited test {}] erasurecode.ErasureCodeNative(55): ISA-L support is not available in your platform... using builtin-java codec where applicable 2024-11-24T02:49:11,858 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=161, OpenFileDescriptor=391, MaxFileDescriptor=1048576, SystemLoadAverage=483, ProcessCount=11, AvailableMemoryMB=11854 2024-11-24T02:49:11,860 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-24T02:49:11,871 INFO [Time-limited test {}] hbase.HBaseTestingUtil(821): NOT STARTING DFS 2024-11-24T02:49:11,966 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ffbd484f-b731-e1f6-1e1c-28d5ac9f75b5/cluster_005b2254-078b-8f09-7365-d38ea994b44a/zookeeper_0, clientPort=60751, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ffbd484f-b731-e1f6-1e1c-28d5ac9f75b5/cluster_005b2254-078b-8f09-7365-d38ea994b44a/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ffbd484f-b731-e1f6-1e1c-28d5ac9f75b5/cluster_005b2254-078b-8f09-7365-d38ea994b44a/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-24T02:49:11,975 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=60751 2024-11-24T02:49:11,985 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T02:49:11,987 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T02:49:12,085 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-24T02:49:12,086 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-24T02:49:12,136 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1190222563_22 at /127.0.0.1:52346 [Receiving block BP-1181736044-172.17.0.2-1732416548023:blk_-9223372036854775792_1001] {}] datanode.DataXceiver(331): 127.0.0.1:39229:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52346 dst: /127.0.0.1:39229 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:49:12,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39229 is added to blk_-9223372036854775792_1002 (size=7) 2024-11-24T02:49:12,566 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-24T02:49:12,581 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:43341/user/jenkins/test-data/2f50fdcd-1c9b-5edb-ddb3-4d602a2ea19d with version=8 2024-11-24T02:49:12,581 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:43341/user/jenkins/test-data/2f50fdcd-1c9b-5edb-ddb3-4d602a2ea19d/hbase-staging 2024-11-24T02:49:12,666 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-24T02:49:12,905 INFO [Time-limited test {}] client.ConnectionUtils(128): master/7c69a60bd8f6:0 server-side Connection retries=45 2024-11-24T02:49:12,916 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T02:49:12,917 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T02:49:12,923 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T02:49:12,923 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T02:49:12,923 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T02:49:13,088 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-24T02:49:13,159 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-24T02:49:13,169 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-24T02:49:13,172 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T02:49:13,198 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 54511 (auto-detected) 2024-11-24T02:49:13,200 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-24T02:49:13,221 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41157 2024-11-24T02:49:13,250 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:41157 connecting to ZooKeeper ensemble=127.0.0.1:60751 2024-11-24T02:49:13,407 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:411570x0, quorum=127.0.0.1:60751, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T02:49:13,410 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:41157-0x1016abeb7b70000 connected 2024-11-24T02:49:13,711 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T02:49:13,716 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T02:49:13,725 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41157-0x1016abeb7b70000, quorum=127.0.0.1:60751, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T02:49:13,729 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:43341/user/jenkins/test-data/2f50fdcd-1c9b-5edb-ddb3-4d602a2ea19d, hbase.cluster.distributed=false 2024-11-24T02:49:13,751 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41157-0x1016abeb7b70000, quorum=127.0.0.1:60751, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T02:49:13,755 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41157 2024-11-24T02:49:13,755 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41157 2024-11-24T02:49:13,756 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41157 2024-11-24T02:49:13,756 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41157 2024-11-24T02:49:13,756 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41157 2024-11-24T02:49:13,857 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/7c69a60bd8f6:0 server-side Connection retries=45 2024-11-24T02:49:13,858 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T02:49:13,858 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T02:49:13,859 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T02:49:13,859 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T02:49:13,859 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T02:49:13,861 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-24T02:49:13,864 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T02:49:13,865 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44195 2024-11-24T02:49:13,867 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:44195 connecting to ZooKeeper ensemble=127.0.0.1:60751 2024-11-24T02:49:13,868 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T02:49:13,873 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T02:49:13,901 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:441950x0, quorum=127.0.0.1:60751, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T02:49:13,901 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44195-0x1016abeb7b70001 connected 2024-11-24T02:49:13,902 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44195-0x1016abeb7b70001, quorum=127.0.0.1:60751, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T02:49:13,907 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-24T02:49:13,916 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-24T02:49:13,919 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44195-0x1016abeb7b70001, quorum=127.0.0.1:60751, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-24T02:49:13,925 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44195-0x1016abeb7b70001, quorum=127.0.0.1:60751, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T02:49:13,926 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44195 2024-11-24T02:49:13,927 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44195 2024-11-24T02:49:13,928 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44195 2024-11-24T02:49:13,931 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44195 2024-11-24T02:49:13,931 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44195 2024-11-24T02:49:13,955 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/7c69a60bd8f6:0 server-side Connection retries=45 2024-11-24T02:49:13,955 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T02:49:13,956 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T02:49:13,956 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T02:49:13,957 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T02:49:13,957 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T02:49:13,957 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-24T02:49:13,958 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T02:49:13,959 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43237 2024-11-24T02:49:13,961 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:43237 connecting to ZooKeeper ensemble=127.0.0.1:60751 2024-11-24T02:49:13,962 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T02:49:13,967 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T02:49:13,987 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:432370x0, quorum=127.0.0.1:60751, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T02:49:13,987 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:432370x0, quorum=127.0.0.1:60751, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T02:49:13,987 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:43237-0x1016abeb7b70002 connected 2024-11-24T02:49:13,988 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-24T02:49:13,993 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-24T02:49:13,994 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43237-0x1016abeb7b70002, quorum=127.0.0.1:60751, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-24T02:49:13,998 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43237-0x1016abeb7b70002, quorum=127.0.0.1:60751, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T02:49:13,999 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43237 2024-11-24T02:49:13,999 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43237 2024-11-24T02:49:14,000 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43237 2024-11-24T02:49:14,001 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43237 2024-11-24T02:49:14,003 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43237 2024-11-24T02:49:14,021 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/7c69a60bd8f6:0 server-side Connection retries=45 2024-11-24T02:49:14,022 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T02:49:14,022 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T02:49:14,022 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T02:49:14,022 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T02:49:14,022 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T02:49:14,022 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-24T02:49:14,023 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T02:49:14,024 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41127 2024-11-24T02:49:14,026 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41127 connecting to ZooKeeper ensemble=127.0.0.1:60751 2024-11-24T02:49:14,028 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T02:49:14,030 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T02:49:14,039 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:411270x0, quorum=127.0.0.1:60751, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T02:49:14,040 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:411270x0, quorum=127.0.0.1:60751, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T02:49:14,040 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41127-0x1016abeb7b70003 connected 2024-11-24T02:49:14,041 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-24T02:49:14,045 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-24T02:49:14,046 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41127-0x1016abeb7b70003, quorum=127.0.0.1:60751, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-24T02:49:14,048 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41127-0x1016abeb7b70003, quorum=127.0.0.1:60751, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T02:49:14,053 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41127 2024-11-24T02:49:14,053 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41127 2024-11-24T02:49:14,056 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41127 2024-11-24T02:49:14,060 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41127 2024-11-24T02:49:14,061 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41127 2024-11-24T02:49:14,079 DEBUG [M:0;7c69a60bd8f6:41157 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;7c69a60bd8f6:41157 2024-11-24T02:49:14,081 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/7c69a60bd8f6,41157,1732416552740 2024-11-24T02:49:14,093 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41127-0x1016abeb7b70003, quorum=127.0.0.1:60751, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T02:49:14,093 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41157-0x1016abeb7b70000, quorum=127.0.0.1:60751, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T02:49:14,093 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44195-0x1016abeb7b70001, quorum=127.0.0.1:60751, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T02:49:14,093 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43237-0x1016abeb7b70002, quorum=127.0.0.1:60751, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T02:49:14,095 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41157-0x1016abeb7b70000, quorum=127.0.0.1:60751, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/7c69a60bd8f6,41157,1732416552740 2024-11-24T02:49:14,123 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43237-0x1016abeb7b70002, quorum=127.0.0.1:60751, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-24T02:49:14,123 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41127-0x1016abeb7b70003, quorum=127.0.0.1:60751, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-24T02:49:14,123 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41157-0x1016abeb7b70000, quorum=127.0.0.1:60751, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:49:14,123 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44195-0x1016abeb7b70001, quorum=127.0.0.1:60751, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-24T02:49:14,124 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41127-0x1016abeb7b70003, quorum=127.0.0.1:60751, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:49:14,124 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44195-0x1016abeb7b70001, quorum=127.0.0.1:60751, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:49:14,124 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43237-0x1016abeb7b70002, quorum=127.0.0.1:60751, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:49:14,125 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41157-0x1016abeb7b70000, quorum=127.0.0.1:60751, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-24T02:49:14,126 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/7c69a60bd8f6,41157,1732416552740 from backup master directory 2024-11-24T02:49:14,134 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43237-0x1016abeb7b70002, quorum=127.0.0.1:60751, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T02:49:14,134 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44195-0x1016abeb7b70001, quorum=127.0.0.1:60751, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T02:49:14,134 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41157-0x1016abeb7b70000, quorum=127.0.0.1:60751, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/7c69a60bd8f6,41157,1732416552740 2024-11-24T02:49:14,134 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41127-0x1016abeb7b70003, quorum=127.0.0.1:60751, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T02:49:14,134 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41157-0x1016abeb7b70000, quorum=127.0.0.1:60751, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T02:49:14,135 WARN [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T02:49:14,135 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=7c69a60bd8f6,41157,1732416552740 2024-11-24T02:49:14,137 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-24T02:49:14,139 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-24T02:49:14,199 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:43341/user/jenkins/test-data/2f50fdcd-1c9b-5edb-ddb3-4d602a2ea19d/hbase.id] with ID: e5fe6634-2829-4111-859f-e49e67d253c8 2024-11-24T02:49:14,200 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:43341/user/jenkins/test-data/2f50fdcd-1c9b-5edb-ddb3-4d602a2ea19d/.tmp/hbase.id 2024-11-24T02:49:14,206 WARN [master/7c69a60bd8f6:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-24T02:49:14,207 WARN [master/7c69a60bd8f6:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-24T02:49:14,214 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1190222563_22 at /127.0.0.1:52370 [Receiving block BP-1181736044-172.17.0.2-1732416548023:blk_-9223372036854775776_1003] {}] datanode.DataXceiver(331): 127.0.0.1:39229:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52370 dst: /127.0.0.1:39229 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:49:14,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39229 is added to blk_-9223372036854775776_1004 (size=42) 2024-11-24T02:49:14,623 WARN [master/7c69a60bd8f6:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-24T02:49:14,624 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:43341/user/jenkins/test-data/2f50fdcd-1c9b-5edb-ddb3-4d602a2ea19d/.tmp/hbase.id]:[hdfs://localhost:43341/user/jenkins/test-data/2f50fdcd-1c9b-5edb-ddb3-4d602a2ea19d/hbase.id] 2024-11-24T02:49:14,669 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T02:49:14,675 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-24T02:49:14,698 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 21ms. 2024-11-24T02:49:14,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41067 is added to blk_-9223372036854775789_1002 (size=7) 2024-11-24T02:49:14,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46783 is added to blk_-9223372036854775788_1002 (size=7) 2024-11-24T02:49:14,796 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41127-0x1016abeb7b70003, quorum=127.0.0.1:60751, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:49:14,796 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43237-0x1016abeb7b70002, quorum=127.0.0.1:60751, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:49:14,796 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44195-0x1016abeb7b70001, quorum=127.0.0.1:60751, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:49:14,796 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41157-0x1016abeb7b70000, quorum=127.0.0.1:60751, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:49:14,809 WARN [IPC Server handler 0 on default port 43341 {}] blockmanagement.BlockPlacementPolicyRackFaultTolerant(145): Only able to place 2 of total expected 3 (maxNodesPerRack=3, numOfReplicas=3) nodes evenly across racks, falling back to evenly place on the remaining racks. This may not guarantee rack-level fault tolerance. Please check if the racks are configured properly. 2024-11-24T02:49:14,809 WARN [IPC Server handler 0 on default port 43341 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-24T02:49:14,809 WARN [IPC Server handler 0 on default port 43341 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-24T02:49:14,809 WARN [IPC Server handler 0 on default port 43341 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-24T02:49:14,816 WARN [master/7c69a60bd8f6:0:becomeActiveMaster {}] io.Closeables(82): IOException thrown while closing Closeable. org.apache.hadoop.ipc.RemoteException: File /user/jenkins/test-data/2f50fdcd-1c9b-5edb-ddb3-4d602a2ea19d/.tmp/hbase-hbck.lock could only be written to 2 of the 3 required nodes for RS-3-2-1024k. There are 3 datanode(s) running and 3 node(s) are excluded in this operation. at org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.chooseTarget4NewBlock(BlockManager.java:2480) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.chooseTargetForNewBlock(FSDirWriteFileOp.java:293) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.getAdditionalBlock(FSNamesystem.java:3075) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.addBlock(NameNodeRpcServer.java:932) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.addBlock(ClientNamenodeProtocolServerSideTranslatorPB.java:603) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.addBlock(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$addBlock$11(ClientNamenodeProtocolTranslatorPB.java:500) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.addBlock(ClientNamenodeProtocolTranslatorPB.java:500) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSOutputStream.addBlock(DFSOutputStream.java:1143) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSStripedOutputStream.allocateNewBlock(DFSStripedOutputStream.java:508) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSStripedOutputStream.writeChunk(DFSStripedOutputStream.java:561) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FSOutputSummer.writeChecksumChunks(FSOutputSummer.java:220) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.fs.FSOutputSummer.flushBuffer(FSOutputSummer.java:165) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.fs.FSOutputSummer.flushBuffer(FSOutputSummer.java:146) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSStripedOutputStream.closeImpl(DFSStripedOutputStream.java:1234) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSOutputStream.close(DFSOutputStream.java:861) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FSDataOutputStream$PositionCache.close(FSDataOutputStream.java:77) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.fs.FSDataOutputStream.close(FSDataOutputStream.java:106) ~[hadoop-common-3.4.1.jar:?] at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) ~[hbase-shaded-miscellaneous-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:994) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2535) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:613) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.lambda$tracedRunnable$2(TraceUtil.java:155) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:49:14,836 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-24T02:49:14,838 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-24T02:49:14,845 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-24T02:49:14,876 WARN [IPC Server handler 0 on default port 43341 {}] blockmanagement.BlockPlacementPolicyRackFaultTolerant(145): Only able to place 2 of total expected 3 (maxNodesPerRack=3, numOfReplicas=3) nodes evenly across racks, falling back to evenly place on the remaining racks. This may not guarantee rack-level fault tolerance. Please check if the racks are configured properly. 2024-11-24T02:49:14,876 WARN [IPC Server handler 0 on default port 43341 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-24T02:49:14,876 WARN [IPC Server handler 0 on default port 43341 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-24T02:49:14,877 WARN [IPC Server handler 0 on default port 43341 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-24T02:49:14,878 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] util.FSTableDescriptors(635): Failed write hdfs://localhost:43341/user/jenkins/test-data/2f50fdcd-1c9b-5edb-ddb3-4d602a2ea19d/MasterData/data/master/store/.tabledesc/.tableinfo.0000000001.1189; retrying up to 10 times org.apache.hadoop.ipc.RemoteException: File /user/jenkins/test-data/2f50fdcd-1c9b-5edb-ddb3-4d602a2ea19d/MasterData/data/master/store/.tabledesc/.tableinfo.0000000001.1189 could only be written to 2 of the 3 required nodes for RS-3-2-1024k. There are 3 datanode(s) running and 3 node(s) are excluded in this operation. at org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.chooseTarget4NewBlock(BlockManager.java:2480) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.chooseTargetForNewBlock(FSDirWriteFileOp.java:293) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.getAdditionalBlock(FSNamesystem.java:3075) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.addBlock(NameNodeRpcServer.java:932) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.addBlock(ClientNamenodeProtocolServerSideTranslatorPB.java:603) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.addBlock(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$addBlock$11(ClientNamenodeProtocolTranslatorPB.java:500) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.addBlock(ClientNamenodeProtocolTranslatorPB.java:500) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSOutputStream.addBlock(DFSOutputStream.java:1143) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSStripedOutputStream.allocateNewBlock(DFSStripedOutputStream.java:508) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSStripedOutputStream.writeChunk(DFSStripedOutputStream.java:561) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FSOutputSummer.writeChecksumChunks(FSOutputSummer.java:220) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.fs.FSOutputSummer.flushBuffer(FSOutputSummer.java:165) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.fs.FSOutputSummer.flushBuffer(FSOutputSummer.java:146) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSStripedOutputStream.closeImpl(DFSStripedOutputStream.java:1234) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSOutputStream.close(DFSOutputStream.java:861) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FSDataOutputStream$PositionCache.close(FSDataOutputStream.java:77) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.fs.FSDataOutputStream.close(FSDataOutputStream.java:106) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.util.FSTableDescriptors.writeTableDescriptor(FSTableDescriptors.java:631) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FSTableDescriptors.createTableDescriptorForTableDirectory(FSTableDescriptors.java:707) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.master.region.MasterRegion.bootstrap(MasterRegion.java:241) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:410) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:1003) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2535) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:613) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.lambda$tracedRunnable$2(TraceUtil.java:155) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:49:14,883 WARN [IPC Server handler 4 on default port 43341 {}] blockmanagement.BlockPlacementPolicyRackFaultTolerant(145): Only able to place 2 of total expected 3 (maxNodesPerRack=3, numOfReplicas=3) nodes evenly across racks, falling back to evenly place on the remaining racks. This may not guarantee rack-level fault tolerance. Please check if the racks are configured properly. 2024-11-24T02:49:14,884 WARN [IPC Server handler 4 on default port 43341 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-24T02:49:14,884 WARN [IPC Server handler 4 on default port 43341 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-24T02:49:14,884 WARN [IPC Server handler 4 on default port 43341 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-24T02:49:14,886 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] util.FSTableDescriptors(635): Failed write hdfs://localhost:43341/user/jenkins/test-data/2f50fdcd-1c9b-5edb-ddb3-4d602a2ea19d/MasterData/data/master/store/.tabledesc/.tableinfo.0000000002.1189; retrying up to 10 times org.apache.hadoop.ipc.RemoteException: File /user/jenkins/test-data/2f50fdcd-1c9b-5edb-ddb3-4d602a2ea19d/MasterData/data/master/store/.tabledesc/.tableinfo.0000000002.1189 could only be written to 2 of the 3 required nodes for RS-3-2-1024k. There are 3 datanode(s) running and 3 node(s) are excluded in this operation. at org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.chooseTarget4NewBlock(BlockManager.java:2480) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.chooseTargetForNewBlock(FSDirWriteFileOp.java:293) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.getAdditionalBlock(FSNamesystem.java:3075) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.addBlock(NameNodeRpcServer.java:932) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.addBlock(ClientNamenodeProtocolServerSideTranslatorPB.java:603) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.addBlock(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$addBlock$11(ClientNamenodeProtocolTranslatorPB.java:500) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.addBlock(ClientNamenodeProtocolTranslatorPB.java:500) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSOutputStream.addBlock(DFSOutputStream.java:1143) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSStripedOutputStream.allocateNewBlock(DFSStripedOutputStream.java:508) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSStripedOutputStream.writeChunk(DFSStripedOutputStream.java:561) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FSOutputSummer.writeChecksumChunks(FSOutputSummer.java:220) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.fs.FSOutputSummer.flushBuffer(FSOutputSummer.java:165) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.fs.FSOutputSummer.flushBuffer(FSOutputSummer.java:146) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSStripedOutputStream.closeImpl(DFSStripedOutputStream.java:1234) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSOutputStream.close(DFSOutputStream.java:861) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FSDataOutputStream$PositionCache.close(FSDataOutputStream.java:77) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.fs.FSDataOutputStream.close(FSDataOutputStream.java:106) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.util.FSTableDescriptors.writeTableDescriptor(FSTableDescriptors.java:631) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FSTableDescriptors.createTableDescriptorForTableDirectory(FSTableDescriptors.java:707) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.master.region.MasterRegion.bootstrap(MasterRegion.java:241) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:410) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:1003) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2535) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:613) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.lambda$tracedRunnable$2(TraceUtil.java:155) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:49:14,893 WARN [IPC Server handler 2 on default port 43341 {}] blockmanagement.BlockPlacementPolicyRackFaultTolerant(145): Only able to place 2 of total expected 3 (maxNodesPerRack=3, numOfReplicas=3) nodes evenly across racks, falling back to evenly place on the remaining racks. This may not guarantee rack-level fault tolerance. Please check if the racks are configured properly. 2024-11-24T02:49:14,893 WARN [IPC Server handler 2 on default port 43341 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-24T02:49:14,893 WARN [IPC Server handler 2 on default port 43341 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-24T02:49:14,894 WARN [IPC Server handler 2 on default port 43341 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-24T02:49:14,895 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] util.FSTableDescriptors(635): Failed write hdfs://localhost:43341/user/jenkins/test-data/2f50fdcd-1c9b-5edb-ddb3-4d602a2ea19d/MasterData/data/master/store/.tabledesc/.tableinfo.0000000003.1189; retrying up to 10 times org.apache.hadoop.ipc.RemoteException: File /user/jenkins/test-data/2f50fdcd-1c9b-5edb-ddb3-4d602a2ea19d/MasterData/data/master/store/.tabledesc/.tableinfo.0000000003.1189 could only be written to 2 of the 3 required nodes for RS-3-2-1024k. There are 3 datanode(s) running and 3 node(s) are excluded in this operation. at org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.chooseTarget4NewBlock(BlockManager.java:2480) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.chooseTargetForNewBlock(FSDirWriteFileOp.java:293) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.getAdditionalBlock(FSNamesystem.java:3075) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.addBlock(NameNodeRpcServer.java:932) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.addBlock(ClientNamenodeProtocolServerSideTranslatorPB.java:603) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.addBlock(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$addBlock$11(ClientNamenodeProtocolTranslatorPB.java:500) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.addBlock(ClientNamenodeProtocolTranslatorPB.java:500) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSOutputStream.addBlock(DFSOutputStream.java:1143) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSStripedOutputStream.allocateNewBlock(DFSStripedOutputStream.java:508) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSStripedOutputStream.writeChunk(DFSStripedOutputStream.java:561) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FSOutputSummer.writeChecksumChunks(FSOutputSummer.java:220) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.fs.FSOutputSummer.flushBuffer(FSOutputSummer.java:165) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.fs.FSOutputSummer.flushBuffer(FSOutputSummer.java:146) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSStripedOutputStream.closeImpl(DFSStripedOutputStream.java:1234) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSOutputStream.close(DFSOutputStream.java:861) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FSDataOutputStream$PositionCache.close(FSDataOutputStream.java:77) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.fs.FSDataOutputStream.close(FSDataOutputStream.java:106) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.util.FSTableDescriptors.writeTableDescriptor(FSTableDescriptors.java:631) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FSTableDescriptors.createTableDescriptorForTableDirectory(FSTableDescriptors.java:707) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.master.region.MasterRegion.bootstrap(MasterRegion.java:241) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:410) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:1003) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2535) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:613) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.lambda$tracedRunnable$2(TraceUtil.java:155) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:49:14,905 WARN [master/7c69a60bd8f6:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-24T02:49:14,905 WARN [master/7c69a60bd8f6:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-24T02:49:14,908 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1190222563_22 at /127.0.0.1:52390 [Receiving block BP-1181736044-172.17.0.2-1732416548023:blk_-9223372036854775760_1005] {}] datanode.DataXceiver(331): 127.0.0.1:39229:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52390 dst: /127.0.0.1:39229 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:49:14,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39229 is added to blk_-9223372036854775760_1006 (size=1189) 2024-11-24T02:49:14,919 WARN [master/7c69a60bd8f6:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-24T02:49:14,923 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] util.FSTableDescriptors(591): Deleted hdfs://localhost:43341/user/jenkins/test-data/2f50fdcd-1c9b-5edb-ddb3-4d602a2ea19d/MasterData/data/master/store/.tabledesc/.tableinfo.0000000001.1189 2024-11-24T02:49:14,925 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] util.FSTableDescriptors(591): Deleted hdfs://localhost:43341/user/jenkins/test-data/2f50fdcd-1c9b-5edb-ddb3-4d602a2ea19d/MasterData/data/master/store/.tabledesc/.tableinfo.0000000002.1189 2024-11-24T02:49:14,926 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] util.FSTableDescriptors(591): Deleted hdfs://localhost:43341/user/jenkins/test-data/2f50fdcd-1c9b-5edb-ddb3-4d602a2ea19d/MasterData/data/master/store/.tabledesc/.tableinfo.0000000003.1189 2024-11-24T02:49:14,945 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:43341/user/jenkins/test-data/2f50fdcd-1c9b-5edb-ddb3-4d602a2ea19d/MasterData/data/master/store 2024-11-24T02:49:14,965 WARN [IPC Server handler 1 on default port 43341 {}] blockmanagement.BlockPlacementPolicyRackFaultTolerant(145): Only able to place 2 of total expected 3 (maxNodesPerRack=3, numOfReplicas=3) nodes evenly across racks, falling back to evenly place on the remaining racks. This may not guarantee rack-level fault tolerance. Please check if the racks are configured properly. 2024-11-24T02:49:14,966 WARN [IPC Server handler 1 on default port 43341 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-24T02:49:14,966 WARN [IPC Server handler 1 on default port 43341 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-24T02:49:14,966 WARN [IPC Server handler 1 on default port 43341 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-24T02:49:14,967 ERROR [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.HMaster(2539): Failed to become active master org.apache.hadoop.ipc.RemoteException: File /user/jenkins/test-data/2f50fdcd-1c9b-5edb-ddb3-4d602a2ea19d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.regioninfo could only be written to 2 of the 3 required nodes for RS-3-2-1024k. There are 3 datanode(s) running and 3 node(s) are excluded in this operation. at org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.chooseTarget4NewBlock(BlockManager.java:2480) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.chooseTargetForNewBlock(FSDirWriteFileOp.java:293) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.getAdditionalBlock(FSNamesystem.java:3075) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.addBlock(NameNodeRpcServer.java:932) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.addBlock(ClientNamenodeProtocolServerSideTranslatorPB.java:603) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.addBlock(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$addBlock$11(ClientNamenodeProtocolTranslatorPB.java:500) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.addBlock(ClientNamenodeProtocolTranslatorPB.java:500) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSOutputStream.addBlock(DFSOutputStream.java:1143) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSStripedOutputStream.allocateNewBlock(DFSStripedOutputStream.java:508) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSStripedOutputStream.writeChunk(DFSStripedOutputStream.java:561) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FSOutputSummer.writeChecksumChunks(FSOutputSummer.java:220) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.fs.FSOutputSummer.flushBuffer(FSOutputSummer.java:165) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.fs.FSOutputSummer.flushBuffer(FSOutputSummer.java:146) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSStripedOutputStream.closeImpl(DFSStripedOutputStream.java:1234) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSOutputStream.close(DFSOutputStream.java:861) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FSDataOutputStream$PositionCache.close(FSDataOutputStream.java:77) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.fs.FSDataOutputStream.close(FSDataOutputStream.java:106) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.regionserver.HRegionFileSystem.writeRegionInfoFileContent(HRegionFileSystem.java:815) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegionFileSystem.writeRegionInfoOnFilesystem(HRegionFileSystem.java:906) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegionFileSystem.writeRegionInfoOnFilesystem(HRegionFileSystem.java:868) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegionFileSystem.createRegionOnFileSystem(HRegionFileSystem.java:936) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.createHRegion(HRegion.java:7592) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.bootstrap(MasterRegion.java:242) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:410) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:1003) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2535) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:613) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.lambda$tracedRunnable$2(TraceUtil.java:155) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:49:14,968 ERROR [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.HMaster(3223): ***** ABORTING master 7c69a60bd8f6,41157,1732416552740: Unhandled exception. Starting shutdown. ***** org.apache.hadoop.ipc.RemoteException: File /user/jenkins/test-data/2f50fdcd-1c9b-5edb-ddb3-4d602a2ea19d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.regioninfo could only be written to 2 of the 3 required nodes for RS-3-2-1024k. There are 3 datanode(s) running and 3 node(s) are excluded in this operation. at org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.chooseTarget4NewBlock(BlockManager.java:2480) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.chooseTargetForNewBlock(FSDirWriteFileOp.java:293) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.getAdditionalBlock(FSNamesystem.java:3075) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.addBlock(NameNodeRpcServer.java:932) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.addBlock(ClientNamenodeProtocolServerSideTranslatorPB.java:603) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.addBlock(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$addBlock$11(ClientNamenodeProtocolTranslatorPB.java:500) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.addBlock(ClientNamenodeProtocolTranslatorPB.java:500) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSOutputStream.addBlock(DFSOutputStream.java:1143) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSStripedOutputStream.allocateNewBlock(DFSStripedOutputStream.java:508) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSStripedOutputStream.writeChunk(DFSStripedOutputStream.java:561) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FSOutputSummer.writeChecksumChunks(FSOutputSummer.java:220) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.fs.FSOutputSummer.flushBuffer(FSOutputSummer.java:165) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.fs.FSOutputSummer.flushBuffer(FSOutputSummer.java:146) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSStripedOutputStream.closeImpl(DFSStripedOutputStream.java:1234) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSOutputStream.close(DFSOutputStream.java:861) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FSDataOutputStream$PositionCache.close(FSDataOutputStream.java:77) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.fs.FSDataOutputStream.close(FSDataOutputStream.java:106) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.regionserver.HRegionFileSystem.writeRegionInfoFileContent(HRegionFileSystem.java:815) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegionFileSystem.writeRegionInfoOnFilesystem(HRegionFileSystem.java:906) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegionFileSystem.writeRegionInfoOnFilesystem(HRegionFileSystem.java:868) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegionFileSystem.createRegionOnFileSystem(HRegionFileSystem.java:936) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.createHRegion(HRegion.java:7592) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.bootstrap(MasterRegion.java:242) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:410) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:1003) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2535) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:613) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.lambda$tracedRunnable$2(TraceUtil.java:155) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:49:14,969 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.HMaster(3321): ***** STOPPING master '7c69a60bd8f6,41157,1732416552740' ***** 2024-11-24T02:49:14,969 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.HMaster(3323): STOPPED: Stopped by master/7c69a60bd8f6:0:becomeActiveMaster 2024-11-24T02:49:14,969 INFO [M:0;7c69a60bd8f6:41157 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T02:49:14,970 INFO [M:0;7c69a60bd8f6:41157 {}] hbase.ChoreService(370): Chore service for: master/7c69a60bd8f6:0 had [] on shutdown 2024-11-24T02:49:14,970 INFO [M:0;7c69a60bd8f6:41157 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T02:49:14,970 DEBUG [M:0;7c69a60bd8f6:41157 {}] master.HMaster(1795): Stopping service threads 2024-11-24T02:49:15,019 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44195-0x1016abeb7b70001, quorum=127.0.0.1:60751, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-24T02:49:15,019 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41157-0x1016abeb7b70000, quorum=127.0.0.1:60751, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-24T02:49:15,019 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41127-0x1016abeb7b70003, quorum=127.0.0.1:60751, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-24T02:49:15,019 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41157-0x1016abeb7b70000, quorum=127.0.0.1:60751, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:49:15,020 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43237-0x1016abeb7b70002, quorum=127.0.0.1:60751, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-24T02:49:15,020 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41127-0x1016abeb7b70003, quorum=127.0.0.1:60751, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:49:15,020 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43237-0x1016abeb7b70002, quorum=127.0.0.1:60751, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:49:15,020 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44195-0x1016abeb7b70001, quorum=127.0.0.1:60751, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:49:15,082 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44195-0x1016abeb7b70001, quorum=127.0.0.1:60751, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-24T02:49:15,083 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41127-0x1016abeb7b70003, quorum=127.0.0.1:60751, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-24T02:49:15,083 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:43237-0x1016abeb7b70002, quorum=127.0.0.1:60751, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-24T02:49:15,083 DEBUG [M:0;7c69a60bd8f6:41157 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/master already deleted, retry=false 2024-11-24T02:49:15,083 DEBUG [M:0;7c69a60bd8f6:41157 {}] master.ActiveMasterManager(353): master:41157-0x1016abeb7b70000, quorum=127.0.0.1:60751, baseZNode=/hbase Failed delete of our master address node; KeeperErrorCode = NoNode for /hbase/master 2024-11-24T02:49:15,083 INFO [M:0;7c69a60bd8f6:41157 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41157 2024-11-24T02:49:15,086 INFO [M:0;7c69a60bd8f6:41157 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T02:49:15,236 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41157-0x1016abeb7b70000, quorum=127.0.0.1:60751, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T02:49:15,236 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41157-0x1016abeb7b70000, quorum=127.0.0.1:60751, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T02:49:15,236 INFO [M:0;7c69a60bd8f6:41157 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T02:49:17,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41067 is added to blk_-9223372036854775772_1004 (size=42) 2024-11-24T02:49:17,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46783 is added to blk_-9223372036854775773_1004 (size=42) 2024-11-24T02:49:17,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46783 is added to blk_-9223372036854775757_1006 (size=1189) 2024-11-24T02:49:17,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41067 is added to blk_-9223372036854775756_1006 (size=1189) 2024-11-24T02:49:41,725 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Thread dump because: Master not active after 30000ms 249 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 4 Waited count: 5 Waiting on java.lang.ref.ReferenceQueue$Lock@541e0303 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 8 Waited count: 8 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: WAITING Blocked count: 1 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@82123e7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: RUNNABLE Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 376 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 5 Waiting on java.util.concurrent.CountDownLatch$Sync@7117d86d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 20 (process reaper): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 91 Waited count: 409 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.JVMClusterUtil.waitForEvent(JVMClusterUtil.java:220) app//org.apache.hadoop.hbase.util.JVMClusterUtil.startup(JVMClusterUtil.java:177) app//org.apache.hadoop.hbase.LocalHBaseCluster.startup(LocalHBaseCluster.java:409) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.init(SingleProcessHBaseCluster.java:245) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.(SingleProcessHBaseCluster.java:111) app//org.apache.hadoop.hbase.HBaseTestingUtil.startMiniHBaseCluster(HBaseTestingUtil.java:863) app//org.apache.hadoop.hbase.HBaseTestingUtil.startMiniCluster(HBaseTestingUtil.java:830) app//org.apache.hadoop.hbase.HBaseTestingUtil.startMiniCluster(HBaseTestingUtil.java:784) app//org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.setUp(TestHBaseWalOnEC.java:96) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@4a1533a6 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@31a5b670 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 33 (Timer for 'NameNode' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@652b4855): State: TIMED_WAITING Blocked count: 0 Waited count: 71 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-5-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp108324141-37): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fa8c0443a50.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp108324141-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fa8c0443a50.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp108324141-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fa8c0443a50.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp108324141-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fa8c0443a50.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp108324141-41-acceptor-0@71c41e7b-ServerConnector@4637aff6{HTTP/1.1, (http/1.1)}{localhost:41519}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp108324141-42): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp108324141-43): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp108324141-44): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-3b657350-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 1 Waited count: 51 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@40dd9ad6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 43341): State: TIMED_WAITING Blocked count: 1 Waited count: 5 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@77bbc3e6): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@2ef235f7): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 3425 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 0 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dee79e1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 43341): State: TIMED_WAITING Blocked count: 2 Waited count: 60 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 43341): State: TIMED_WAITING Blocked count: 18 Waited count: 71 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 43341): State: TIMED_WAITING Blocked count: 10 Waited count: 59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 43341): State: TIMED_WAITING Blocked count: 32 Waited count: 60 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 43341): State: TIMED_WAITING Blocked count: 15 Waited count: 56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-11-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@82055de): State: TIMED_WAITING Blocked count: 0 Waited count: 18 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@5c0e44ab): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@53e5809b): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@5947cc30): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(2076990096)): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 76 (StorageLocationChecker thread 0): State: TIMED_WAITING Blocked count: 10 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 77 (StorageLocationChecker thread 1): State: TIMED_WAITING Blocked count: 17 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 78 (process reaper): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 86 (pool-17-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp184261614-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fa8c0443a50.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp184261614-88-acceptor-0@165dfcc9-ServerConnector@5306f615{HTTP/1.1, (http/1.1)}{localhost:45449}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp184261614-89): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp184261614-90): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-78216705-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@15c9c7f9): State: TIMED_WAITING Blocked count: 0 Waited count: 69 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 38259): State: TIMED_WAITING Blocked count: 1 Waited count: 5 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 0 Waited count: 13 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d5c4d62 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-1181736044-172.17.0.2-1732416548023 heartbeating to localhost/127.0.0.1:43341): State: TIMED_WAITING Blocked count: 165 Waited count: 40 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-19-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@5332c18f): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 38259): State: TIMED_WAITING Blocked count: 0 Waited count: 48 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 38259): State: TIMED_WAITING Blocked count: 0 Waited count: 52 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 38259): State: TIMED_WAITING Blocked count: 0 Waited count: 45 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 38259): State: TIMED_WAITING Blocked count: 0 Waited count: 50 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 38259): State: TIMED_WAITING Blocked count: 0 Waited count: 43 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 109 (StorageLocationChecker thread 0): State: TIMED_WAITING Blocked count: 4 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 110 (StorageLocationChecker thread 1): State: TIMED_WAITING Blocked count: 4 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 118 (pool-25-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (qtp329487238-119): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fa8c0443a50.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (qtp329487238-120-acceptor-0@74af2dbf-ServerConnector@11f28dd2{HTTP/1.1, (http/1.1)}{localhost:35951}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp329487238-121): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp329487238-122): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (Session-HouseKeeper-4ff37113-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (IPC Client (498748605) connection to localhost/127.0.0.1:43341 from jenkins): State: TIMED_WAITING Blocked count: 32 Waited count: 33 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 125 (IPC Parameter Sending Thread for localhost/127.0.0.1:43341): State: TIMED_WAITING Blocked count: 0 Waited count: 37 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@5c4a21f6): State: TIMED_WAITING Blocked count: 0 Waited count: 69 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 33873): State: TIMED_WAITING Blocked count: 1 Waited count: 5 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 0 Waited count: 13 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1cfb9e96 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-1181736044-172.17.0.2-1732416548023 heartbeating to localhost/127.0.0.1:43341): State: TIMED_WAITING Blocked count: 126 Waited count: 45 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-28-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@ed3f478): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 33873): State: TIMED_WAITING Blocked count: 0 Waited count: 35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 33873): State: TIMED_WAITING Blocked count: 0 Waited count: 35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 33873): State: TIMED_WAITING Blocked count: 0 Waited count: 35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 33873): State: TIMED_WAITING Blocked count: 0 Waited count: 36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 33873): State: TIMED_WAITING Blocked count: 0 Waited count: 35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 143 (StorageLocationChecker thread 0): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 144 (StorageLocationChecker thread 1): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 152 (pool-35-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp2147159807-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fa8c0443a50.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp2147159807-154-acceptor-0@38bcf19a-ServerConnector@a8e922f{HTTP/1.1, (http/1.1)}{localhost:38869}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp2147159807-155): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp2147159807-156): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-30aa85c2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@1220c2ee): State: TIMED_WAITING Blocked count: 0 Waited count: 68 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 40061): State: TIMED_WAITING Blocked count: 1 Waited count: 5 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 0 Waited count: 13 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3e6668a1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-1181736044-172.17.0.2-1732416548023 heartbeating to localhost/127.0.0.1:43341): State: TIMED_WAITING Blocked count: 29 Waited count: 40 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-37-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@2119d5d0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 40061): State: TIMED_WAITING Blocked count: 0 Waited count: 36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 40061): State: TIMED_WAITING Blocked count: 0 Waited count: 47 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 40061): State: TIMED_WAITING Blocked count: 0 Waited count: 35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 40061): State: TIMED_WAITING Blocked count: 0 Waited count: 34 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 40061): State: TIMED_WAITING Blocked count: 0 Waited count: 34 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ffbd484f-b731-e1f6-1e1c-28d5ac9f75b5/cluster_005b2254-078b-8f09-7365-d38ea994b44a/data/data3)): State: TIMED_WAITING Blocked count: 13 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ffbd484f-b731-e1f6-1e1c-28d5ac9f75b5/cluster_005b2254-078b-8f09-7365-d38ea994b44a/data/data1)): State: TIMED_WAITING Blocked count: 30 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 187 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ffbd484f-b731-e1f6-1e1c-28d5ac9f75b5/cluster_005b2254-078b-8f09-7365-d38ea994b44a/data/data4)): State: TIMED_WAITING Blocked count: 29 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 188 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ffbd484f-b731-e1f6-1e1c-28d5ac9f75b5/cluster_005b2254-078b-8f09-7365-d38ea994b44a/data/data2)): State: TIMED_WAITING Blocked count: 20 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 193 (process reaper): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 196 (process reaper): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 199 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ffbd484f-b731-e1f6-1e1c-28d5ac9f75b5/cluster_005b2254-078b-8f09-7365-d38ea994b44a/data/data1/current/BP-1181736044-172.17.0.2-1732416548023): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ffbd484f-b731-e1f6-1e1c-28d5ac9f75b5/cluster_005b2254-078b-8f09-7365-d38ea994b44a/data/data3/current/BP-1181736044-172.17.0.2-1732416548023): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 201 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ffbd484f-b731-e1f6-1e1c-28d5ac9f75b5/cluster_005b2254-078b-8f09-7365-d38ea994b44a/data/data2/current/BP-1181736044-172.17.0.2-1732416548023): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 202 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ffbd484f-b731-e1f6-1e1c-28d5ac9f75b5/cluster_005b2254-078b-8f09-7365-d38ea994b44a/data/data4/current/BP-1181736044-172.17.0.2-1732416548023): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 207 (ForkJoinPool-2-worker-1): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.ForkJoinPool@5125510d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 208 (ForkJoinPool-2-worker-2): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 209 (ForkJoinPool-2-worker-3): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.ForkJoinPool@5125510d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 210 (ForkJoinPool-2-worker-4): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.ForkJoinPool@5125510d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 211 (ForkJoinPool-2-worker-5): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.ForkJoinPool@5125510d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 212 (ForkJoinPool-2-worker-6): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.ForkJoinPool@5125510d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 213 (ForkJoinPool-2-worker-7): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.ForkJoinPool@5125510d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 214 (ForkJoinPool-2-worker-8): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.ForkJoinPool@5125510d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 215 (DataNode DiskChecker thread 0): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 216 (DataNode DiskChecker thread 0): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 217 (pool-22-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 218 (pool-14-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 223 (java.util.concurrent.ThreadPoolExecutor$Worker@7d562c88[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 224 (java.util.concurrent.ThreadPoolExecutor$Worker@5c283490[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 225 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ffbd484f-b731-e1f6-1e1c-28d5ac9f75b5/cluster_005b2254-078b-8f09-7365-d38ea994b44a/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 226 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ffbd484f-b731-e1f6-1e1c-28d5ac9f75b5/cluster_005b2254-078b-8f09-7365-d38ea994b44a/data/data6)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 230 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ffbd484f-b731-e1f6-1e1c-28d5ac9f75b5/cluster_005b2254-078b-8f09-7365-d38ea994b44a/data/data6/current/BP-1181736044-172.17.0.2-1732416548023): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 232 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ffbd484f-b731-e1f6-1e1c-28d5ac9f75b5/cluster_005b2254-078b-8f09-7365-d38ea994b44a/data/data5/current/BP-1181736044-172.17.0.2-1732416548023): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 235 (DataNode DiskChecker thread 0): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 236 (pool-32-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 239 (java.util.concurrent.ThreadPoolExecutor$Worker@b1208d3[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 240 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 246 (LeaseRenewer:jenkins@localhost:43341): State: TIMED_WAITING Blocked count: 0 Waited count: 33 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 253 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 254 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 255 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:60751): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 252 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 256 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 17 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 257 (SyncThread:0): State: WAITING Blocked count: 1 Waited count: 58 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60114386 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 258 (ProcessThread(sid:0 cport:60751):): State: WAITING Blocked count: 0 Waited count: 69 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7e9c5282 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 259 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 73 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@27b908bc Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 260 (NIOWorkerThread-1): State: WAITING Blocked count: 1 Waited count: 13 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@16f47012 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@7c930c39 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 278 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 281 (NIOWorkerThread-2): State: WAITING Blocked count: 1 Waited count: 13 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@16f47012 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 282 (NIOWorkerThread-3): State: WAITING Blocked count: 0 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@16f47012 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 283 (NIOWorkerThread-4): State: WAITING Blocked count: 1 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@16f47012 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 285 (NIOWorkerThread-5): State: WAITING Blocked count: 0 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@16f47012 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 286 (NIOWorkerThread-6): State: WAITING Blocked count: 2 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@16f47012 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 287 (NIOWorkerThread-7): State: WAITING Blocked count: 3 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@16f47012 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 288 (NIOWorkerThread-8): State: WAITING Blocked count: 0 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@16f47012 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 289 (NIOWorkerThread-9): State: WAITING Blocked count: 0 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@16f47012 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 290 (NIOWorkerThread-10): State: WAITING Blocked count: 0 Waited count: 13 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@16f47012 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 291 (NIOWorkerThread-11): State: WAITING Blocked count: 0 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@16f47012 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 292 (NIOWorkerThread-12): State: WAITING Blocked count: 0 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@16f47012 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 293 (NIOWorkerThread-13): State: WAITING Blocked count: 0 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@16f47012 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 294 (NIOWorkerThread-14): State: WAITING Blocked count: 0 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@16f47012 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 295 (NIOWorkerThread-15): State: WAITING Blocked count: 2 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@16f47012 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 296 (NIOWorkerThread-16): State: WAITING Blocked count: 0 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@16f47012 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 310 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@f14512f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 311 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 313 (Time-limited test-SendThread(127.0.0.1:60751)): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 314 (Time-limited test-EventThread): State: WAITING Blocked count: 2 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3ad5c43f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 315 (zk-event-processor-pool-0): State: WAITING Blocked count: 9 Waited count: 14 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5ec3e598 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 316 (Time-limited test.LruBlockCache.EvictionThread): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.io.hfile.LruBlockCache$EvictionThread.run(LruBlockCache.java:957) Thread 318 (LruBlockCacheStatsExecutor): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 320 (MobFileCache #0): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 321 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44195): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@7c2b8794 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 322 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44195): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@514ab7c8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 323 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44195): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@5497d260 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 324 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44195): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@43510bf9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 325 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44195): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@63e63320 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 326 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44195): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@63e63320 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 327 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=44195): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@67162c63 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 328 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=44195): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@d78e633 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 329 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=44195): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@46bd3aa6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 330 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=44195): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@14107e62 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 333 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 335 (pool-71-thread-1-SendThread(127.0.0.1:60751)): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 336 (pool-71-thread-1-EventThread): State: WAITING Blocked count: 0 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42332db7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 337 (zk-event-processor-pool-0): State: WAITING Blocked count: 10 Waited count: 14 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@83002bc Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 338 (Time-limited test.LruBlockCache.EvictionThread): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.io.hfile.LruBlockCache$EvictionThread.run(LruBlockCache.java:957) Thread 340 (LruBlockCacheStatsExecutor): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 342 (MobFileCache #0): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 343 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43237): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@632bfff8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 344 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43237): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@7fc84d4a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 345 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43237): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@50234e8f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 346 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43237): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@ad1f948 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 347 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43237): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@363903d5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 348 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43237): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@363903d5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 349 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=43237): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@b26aa45 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 350 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=43237): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@41bbca98 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 351 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=43237): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@44a5705e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 352 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=43237): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@3afce4b4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 355 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 357 (Time-limited test-SendThread(127.0.0.1:60751)): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 358 (Time-limited test-EventThread): State: WAITING Blocked count: 1 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4b6052c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 359 (zk-event-processor-pool-0): State: WAITING Blocked count: 13 Waited count: 14 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1b46f028 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 360 (Time-limited test.LruBlockCache.EvictionThread): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.io.hfile.LruBlockCache$EvictionThread.run(LruBlockCache.java:957) Thread 362 (LruBlockCacheStatsExecutor): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 364 (MobFileCache #0): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 365 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41127): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@31ca5cbb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 366 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41127): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@376654a8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 367 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41127): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@48acd894 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 368 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41127): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5c5db486 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 369 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41127): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3f910432 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 370 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41127): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3f910432 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 371 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=41127): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@3ad7d47c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 372 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=41127): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@1411d3ca Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 373 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=41127): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@25d9ac4a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 374 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=41127): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@12de8c57 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 378 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 380 (master/7c69a60bd8f6:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 382 (master/7c69a60bd8f6:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 384 (StripedRead-0): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 385 (org.apache.hadoop.hdfs.PeerCache@7d846b85): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 400 (StripedBlockReconstruction-0): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 404 (stripedRead-0): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 411 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 293 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 446 (StripedBlockReconstruction-0): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 447 (StripedBlockReconstruction-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 452 (stripedRead-0): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) 2024-11-24T02:49:44,137 ERROR [Time-limited test {}] hbase.SingleProcessHBaseCluster(250): Error starting cluster java.lang.RuntimeException: Master not active after 30000ms at org.apache.hadoop.hbase.util.JVMClusterUtil.waitForEvent(JVMClusterUtil.java:221) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.JVMClusterUtil.startup(JVMClusterUtil.java:177) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.LocalHBaseCluster.startup(LocalHBaseCluster.java:409) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.SingleProcessHBaseCluster.init(SingleProcessHBaseCluster.java:245) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.SingleProcessHBaseCluster.(SingleProcessHBaseCluster.java:111) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.startMiniHBaseCluster(HBaseTestingUtil.java:863) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.startMiniCluster(HBaseTestingUtil.java:830) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.startMiniCluster(HBaseTestingUtil.java:784) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.setUp(TestHBaseWalOnEC.java:96) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.invokeMethod(RunBefores.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:24) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.run(ParentRunner.java:413) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.Suite.runChild(Suite.java:128) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.Suite.runChild(Suite.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:49:44,138 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-24T02:49:44,138 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '7c69a60bd8f6,44195,1732416553823' ***** 2024-11-24T02:49:44,138 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-24T02:49:44,138 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '7c69a60bd8f6,43237,1732416553954' ***** 2024-11-24T02:49:44,139 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-24T02:49:44,139 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '7c69a60bd8f6,41127,1732416554021' ***** 2024-11-24T02:49:44,139 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-24T02:49:44,139 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-11-24T02:49:44,139 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-24T02:49:44,200 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2e59159d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T02:49:44,204 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@a8e922f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T02:49:44,204 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T02:49:44,205 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@24f92c39{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T02:49:44,205 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@c62369b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ffbd484f-b731-e1f6-1e1c-28d5ac9f75b5/hadoop.log.dir/,STOPPED} 2024-11-24T02:49:44,210 WARN [BP-1181736044-172.17.0.2-1732416548023 heartbeating to localhost/127.0.0.1:43341 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T02:49:44,210 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T02:49:44,210 WARN [BP-1181736044-172.17.0.2-1732416548023 heartbeating to localhost/127.0.0.1:43341 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1181736044-172.17.0.2-1732416548023 (Datanode Uuid 52734fc8-d62f-4a2b-8532-31f88e73bf9c) service to localhost/127.0.0.1:43341 2024-11-24T02:49:44,210 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T02:49:44,212 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ffbd484f-b731-e1f6-1e1c-28d5ac9f75b5/cluster_005b2254-078b-8f09-7365-d38ea994b44a/data/data5/current/BP-1181736044-172.17.0.2-1732416548023 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T02:49:44,212 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ffbd484f-b731-e1f6-1e1c-28d5ac9f75b5/cluster_005b2254-078b-8f09-7365-d38ea994b44a/data/data6/current/BP-1181736044-172.17.0.2-1732416548023 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T02:49:44,213 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T02:49:44,215 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1c6b8f01{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T02:49:44,216 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@11f28dd2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T02:49:44,216 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T02:49:44,216 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7fa8fa5c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T02:49:44,216 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6463ad04{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ffbd484f-b731-e1f6-1e1c-28d5ac9f75b5/hadoop.log.dir/,STOPPED} 2024-11-24T02:49:44,218 WARN [BP-1181736044-172.17.0.2-1732416548023 heartbeating to localhost/127.0.0.1:43341 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T02:49:44,218 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T02:49:44,218 WARN [BP-1181736044-172.17.0.2-1732416548023 heartbeating to localhost/127.0.0.1:43341 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1181736044-172.17.0.2-1732416548023 (Datanode Uuid 88ce32c5-a72d-484f-a1b8-a80923091096) service to localhost/127.0.0.1:43341 2024-11-24T02:49:44,218 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T02:49:44,219 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ffbd484f-b731-e1f6-1e1c-28d5ac9f75b5/cluster_005b2254-078b-8f09-7365-d38ea994b44a/data/data3/current/BP-1181736044-172.17.0.2-1732416548023 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T02:49:44,219 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ffbd484f-b731-e1f6-1e1c-28d5ac9f75b5/cluster_005b2254-078b-8f09-7365-d38ea994b44a/data/data4/current/BP-1181736044-172.17.0.2-1732416548023 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T02:49:44,219 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T02:49:44,224 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4839957b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T02:49:44,224 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5306f615{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T02:49:44,224 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T02:49:44,225 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1a2478ad{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T02:49:44,225 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@550154bd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ffbd484f-b731-e1f6-1e1c-28d5ac9f75b5/hadoop.log.dir/,STOPPED} 2024-11-24T02:49:44,226 WARN [BP-1181736044-172.17.0.2-1732416548023 heartbeating to localhost/127.0.0.1:43341 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T02:49:44,226 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T02:49:44,227 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T02:49:44,227 WARN [BP-1181736044-172.17.0.2-1732416548023 heartbeating to localhost/127.0.0.1:43341 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1181736044-172.17.0.2-1732416548023 (Datanode Uuid 7b7cce00-44f4-4dad-8b82-47c20140b8b3) service to localhost/127.0.0.1:43341 2024-11-24T02:49:44,227 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ffbd484f-b731-e1f6-1e1c-28d5ac9f75b5/cluster_005b2254-078b-8f09-7365-d38ea994b44a/data/data1/current/BP-1181736044-172.17.0.2-1732416548023 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T02:49:44,227 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ffbd484f-b731-e1f6-1e1c-28d5ac9f75b5/cluster_005b2254-078b-8f09-7365-d38ea994b44a/data/data2/current/BP-1181736044-172.17.0.2-1732416548023 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T02:49:44,228 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T02:49:44,238 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@76e4c45c{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T02:49:44,239 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4637aff6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T02:49:44,239 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T02:49:44,239 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@383d55e4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T02:49:44,240 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@21b7d177{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ffbd484f-b731-e1f6-1e1c-28d5ac9f75b5/hadoop.log.dir/,STOPPED} 2024-11-24T02:49:44,248 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-24T02:49:44,273 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-24T02:49:44,283 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=110 (was 161), OpenFileDescriptor=374 (was 391), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=366 (was 483), ProcessCount=11 (was 11), AvailableMemoryMB=10860 (was 11854) 2024-11-24T02:49:44,289 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=110, OpenFileDescriptor=374, MaxFileDescriptor=1048576, SystemLoadAverage=366, ProcessCount=11, AvailableMemoryMB=10859 2024-11-24T02:49:44,289 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-24T02:49:44,290 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ffbd484f-b731-e1f6-1e1c-28d5ac9f75b5/hadoop.log.dir so I do NOT create it in target/test-data/3a3b69e9-b15f-c8cc-be43-f65f8860f049 2024-11-24T02:49:44,290 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ffbd484f-b731-e1f6-1e1c-28d5ac9f75b5/hadoop.tmp.dir so I do NOT create it in target/test-data/3a3b69e9-b15f-c8cc-be43-f65f8860f049 2024-11-24T02:49:44,290 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a3b69e9-b15f-c8cc-be43-f65f8860f049/cluster_e3ba48ec-b8c0-db74-3962-1ace868bf839, deleteOnExit=true 2024-11-24T02:49:44,290 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-24T02:49:44,291 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a3b69e9-b15f-c8cc-be43-f65f8860f049/test.cache.data in system properties and HBase conf 2024-11-24T02:49:44,291 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a3b69e9-b15f-c8cc-be43-f65f8860f049/hadoop.tmp.dir in system properties and HBase conf 2024-11-24T02:49:44,291 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a3b69e9-b15f-c8cc-be43-f65f8860f049/hadoop.log.dir in system properties and HBase conf 2024-11-24T02:49:44,291 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a3b69e9-b15f-c8cc-be43-f65f8860f049/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-24T02:49:44,291 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a3b69e9-b15f-c8cc-be43-f65f8860f049/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-24T02:49:44,291 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-24T02:49:44,292 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-24T02:49:44,292 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a3b69e9-b15f-c8cc-be43-f65f8860f049/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-24T02:49:44,292 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a3b69e9-b15f-c8cc-be43-f65f8860f049/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-24T02:49:44,292 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a3b69e9-b15f-c8cc-be43-f65f8860f049/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-24T02:49:44,292 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a3b69e9-b15f-c8cc-be43-f65f8860f049/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T02:49:44,293 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a3b69e9-b15f-c8cc-be43-f65f8860f049/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-24T02:49:44,293 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a3b69e9-b15f-c8cc-be43-f65f8860f049/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-24T02:49:44,293 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a3b69e9-b15f-c8cc-be43-f65f8860f049/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T02:49:44,293 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a3b69e9-b15f-c8cc-be43-f65f8860f049/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T02:49:44,293 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a3b69e9-b15f-c8cc-be43-f65f8860f049/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-24T02:49:44,293 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a3b69e9-b15f-c8cc-be43-f65f8860f049/nfs.dump.dir in system properties and HBase conf 2024-11-24T02:49:44,293 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a3b69e9-b15f-c8cc-be43-f65f8860f049/java.io.tmpdir in system properties and HBase conf 2024-11-24T02:49:44,294 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a3b69e9-b15f-c8cc-be43-f65f8860f049/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T02:49:44,294 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a3b69e9-b15f-c8cc-be43-f65f8860f049/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-24T02:49:44,294 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a3b69e9-b15f-c8cc-be43-f65f8860f049/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-24T02:49:44,345 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43237-0x1016abeb7b70002, quorum=127.0.0.1:60751, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Disconnected, path=null 2024-11-24T02:49:44,345 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44195-0x1016abeb7b70001, quorum=127.0.0.1:60751, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Disconnected, path=null 2024-11-24T02:49:44,345 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41127-0x1016abeb7b70003, quorum=127.0.0.1:60751, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Disconnected, path=null 2024-11-24T02:49:44,346 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43237-0x1016abeb7b70002, quorum=127.0.0.1:60751, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Disconnected, path=null 2024-11-24T02:49:44,346 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44195-0x1016abeb7b70001, quorum=127.0.0.1:60751, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Disconnected, path=null 2024-11-24T02:49:44,346 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41127-0x1016abeb7b70003, quorum=127.0.0.1:60751, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Disconnected, path=null 2024-11-24T02:49:44,346 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(640): regionserver:43237-0x1016abeb7b70002, quorum=127.0.0.1:60751, baseZNode=/hbase Received Disconnected from ZooKeeper, ignoring 2024-11-24T02:49:44,346 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(640): regionserver:41127-0x1016abeb7b70003, quorum=127.0.0.1:60751, baseZNode=/hbase Received Disconnected from ZooKeeper, ignoring 2024-11-24T02:49:44,346 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(640): regionserver:44195-0x1016abeb7b70001, quorum=127.0.0.1:60751, baseZNode=/hbase Received Disconnected from ZooKeeper, ignoring 2024-11-24T02:49:44,346 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(640): regionserver:43237-0x1016abeb7b70002, quorum=127.0.0.1:60751, baseZNode=/hbase Received Disconnected from ZooKeeper, ignoring 2024-11-24T02:49:44,346 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(640): regionserver:44195-0x1016abeb7b70001, quorum=127.0.0.1:60751, baseZNode=/hbase Received Disconnected from ZooKeeper, ignoring 2024-11-24T02:49:44,346 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(640): regionserver:41127-0x1016abeb7b70003, quorum=127.0.0.1:60751, baseZNode=/hbase Received Disconnected from ZooKeeper, ignoring 2024-11-24T02:49:44,805 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T02:49:44,811 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T02:49:44,812 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T02:49:44,812 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T02:49:44,812 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T02:49:44,813 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T02:49:44,814 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1fca1d0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a3b69e9-b15f-c8cc-be43-f65f8860f049/hadoop.log.dir/,AVAILABLE} 2024-11-24T02:49:44,814 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4141bee1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T02:49:44,909 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@34f8b36d{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a3b69e9-b15f-c8cc-be43-f65f8860f049/java.io.tmpdir/jetty-localhost-40463-hadoop-hdfs-3_4_1-tests_jar-_-any-16252039330681166409/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T02:49:44,910 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5136e662{HTTP/1.1, (http/1.1)}{localhost:40463} 2024-11-24T02:49:44,910 INFO [Time-limited test {}] server.Server(415): Started @38695ms 2024-11-24T02:49:45,195 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T02:49:45,199 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T02:49:45,200 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T02:49:45,200 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T02:49:45,200 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T02:49:45,200 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@35a7e23a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a3b69e9-b15f-c8cc-be43-f65f8860f049/hadoop.log.dir/,AVAILABLE} 2024-11-24T02:49:45,201 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@23e049cf{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T02:49:45,295 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4095cdc1{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a3b69e9-b15f-c8cc-be43-f65f8860f049/java.io.tmpdir/jetty-localhost-41437-hadoop-hdfs-3_4_1-tests_jar-_-any-17112488972727520708/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T02:49:45,295 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@63241e03{HTTP/1.1, (http/1.1)}{localhost:41437} 2024-11-24T02:49:45,295 INFO [Time-limited test {}] server.Server(415): Started @39080ms 2024-11-24T02:49:45,297 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T02:49:45,330 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T02:49:45,334 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T02:49:45,336 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T02:49:45,336 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T02:49:45,336 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T02:49:45,336 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@15be761f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a3b69e9-b15f-c8cc-be43-f65f8860f049/hadoop.log.dir/,AVAILABLE} 2024-11-24T02:49:45,337 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@895c7e7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T02:49:45,429 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3b12351d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a3b69e9-b15f-c8cc-be43-f65f8860f049/java.io.tmpdir/jetty-localhost-43911-hadoop-hdfs-3_4_1-tests_jar-_-any-10370399037126217788/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T02:49:45,430 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1841cfc0{HTTP/1.1, (http/1.1)}{localhost:43911} 2024-11-24T02:49:45,430 INFO [Time-limited test {}] server.Server(415): Started @39215ms 2024-11-24T02:49:45,431 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T02:49:45,460 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T02:49:45,465 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T02:49:45,467 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T02:49:45,467 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T02:49:45,467 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T02:49:45,468 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7aaeb6cf{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a3b69e9-b15f-c8cc-be43-f65f8860f049/hadoop.log.dir/,AVAILABLE} 2024-11-24T02:49:45,468 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7f04037c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T02:49:45,575 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@66690135{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a3b69e9-b15f-c8cc-be43-f65f8860f049/java.io.tmpdir/jetty-localhost-46537-hadoop-hdfs-3_4_1-tests_jar-_-any-15746543705142217497/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T02:49:45,576 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@55fefad1{HTTP/1.1, (http/1.1)}{localhost:46537} 2024-11-24T02:49:45,576 INFO [Time-limited test {}] server.Server(415): Started @39361ms 2024-11-24T02:49:45,577 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T02:49:46,682 WARN [Thread-347 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a3b69e9-b15f-c8cc-be43-f65f8860f049/cluster_e3ba48ec-b8c0-db74-3962-1ace868bf839/data/data1/current/BP-1178940285-172.17.0.2-1732416584320/current, will proceed with Du for space computation calculation, 2024-11-24T02:49:46,691 WARN [Thread-348 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a3b69e9-b15f-c8cc-be43-f65f8860f049/cluster_e3ba48ec-b8c0-db74-3962-1ace868bf839/data/data2/current/BP-1178940285-172.17.0.2-1732416584320/current, will proceed with Du for space computation calculation, 2024-11-24T02:49:46,715 WARN [Thread-287 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T02:49:46,720 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa4112d74d9562d0b with lease ID 0x900090e66ddc8d9c: Processing first storage report for DS-3400123d-3b36-449b-b9b4-275b1f1364c1 from datanode DatanodeRegistration(127.0.0.1:39123, datanodeUuid=c14c80c1-6001-4dd0-a345-cb9c1d96fcd2, infoPort=41109, infoSecurePort=0, ipcPort=44301, storageInfo=lv=-57;cid=testClusterID;nsid=1372089923;c=1732416584320) 2024-11-24T02:49:46,720 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa4112d74d9562d0b with lease ID 0x900090e66ddc8d9c: from storage DS-3400123d-3b36-449b-b9b4-275b1f1364c1 node DatanodeRegistration(127.0.0.1:39123, datanodeUuid=c14c80c1-6001-4dd0-a345-cb9c1d96fcd2, infoPort=41109, infoSecurePort=0, ipcPort=44301, storageInfo=lv=-57;cid=testClusterID;nsid=1372089923;c=1732416584320), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T02:49:46,720 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa4112d74d9562d0b with lease ID 0x900090e66ddc8d9c: Processing first storage report for DS-a41edd3c-2fbf-4f78-916b-b78a4dc30d18 from datanode DatanodeRegistration(127.0.0.1:39123, datanodeUuid=c14c80c1-6001-4dd0-a345-cb9c1d96fcd2, infoPort=41109, infoSecurePort=0, ipcPort=44301, storageInfo=lv=-57;cid=testClusterID;nsid=1372089923;c=1732416584320) 2024-11-24T02:49:46,720 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa4112d74d9562d0b with lease ID 0x900090e66ddc8d9c: from storage DS-a41edd3c-2fbf-4f78-916b-b78a4dc30d18 node DatanodeRegistration(127.0.0.1:39123, datanodeUuid=c14c80c1-6001-4dd0-a345-cb9c1d96fcd2, infoPort=41109, infoSecurePort=0, ipcPort=44301, storageInfo=lv=-57;cid=testClusterID;nsid=1372089923;c=1732416584320), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T02:49:46,992 WARN [Thread-358 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a3b69e9-b15f-c8cc-be43-f65f8860f049/cluster_e3ba48ec-b8c0-db74-3962-1ace868bf839/data/data3/current/BP-1178940285-172.17.0.2-1732416584320/current, will proceed with Du for space computation calculation, 2024-11-24T02:49:46,992 WARN [Thread-359 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a3b69e9-b15f-c8cc-be43-f65f8860f049/cluster_e3ba48ec-b8c0-db74-3962-1ace868bf839/data/data4/current/BP-1178940285-172.17.0.2-1732416584320/current, will proceed with Du for space computation calculation, 2024-11-24T02:49:47,014 WARN [Thread-310 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T02:49:47,018 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb861dcdbdbf66da4 with lease ID 0x900090e66ddc8d9d: Processing first storage report for DS-5e487e97-10a0-416d-8989-c511bf3d04d4 from datanode DatanodeRegistration(127.0.0.1:35293, datanodeUuid=b0b35602-1ef4-4d5d-8ddb-26cfc4202eb0, infoPort=41015, infoSecurePort=0, ipcPort=33539, storageInfo=lv=-57;cid=testClusterID;nsid=1372089923;c=1732416584320) 2024-11-24T02:49:47,018 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb861dcdbdbf66da4 with lease ID 0x900090e66ddc8d9d: from storage DS-5e487e97-10a0-416d-8989-c511bf3d04d4 node DatanodeRegistration(127.0.0.1:35293, datanodeUuid=b0b35602-1ef4-4d5d-8ddb-26cfc4202eb0, infoPort=41015, infoSecurePort=0, ipcPort=33539, storageInfo=lv=-57;cid=testClusterID;nsid=1372089923;c=1732416584320), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-24T02:49:47,018 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb861dcdbdbf66da4 with lease ID 0x900090e66ddc8d9d: Processing first storage report for DS-1e62c265-021a-423d-83de-276a8193233e from datanode DatanodeRegistration(127.0.0.1:35293, datanodeUuid=b0b35602-1ef4-4d5d-8ddb-26cfc4202eb0, infoPort=41015, infoSecurePort=0, ipcPort=33539, storageInfo=lv=-57;cid=testClusterID;nsid=1372089923;c=1732416584320) 2024-11-24T02:49:47,018 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb861dcdbdbf66da4 with lease ID 0x900090e66ddc8d9d: from storage DS-1e62c265-021a-423d-83de-276a8193233e node DatanodeRegistration(127.0.0.1:35293, datanodeUuid=b0b35602-1ef4-4d5d-8ddb-26cfc4202eb0, infoPort=41015, infoSecurePort=0, ipcPort=33539, storageInfo=lv=-57;cid=testClusterID;nsid=1372089923;c=1732416584320), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T02:49:47,032 WARN [Thread-369 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a3b69e9-b15f-c8cc-be43-f65f8860f049/cluster_e3ba48ec-b8c0-db74-3962-1ace868bf839/data/data5/current/BP-1178940285-172.17.0.2-1732416584320/current, will proceed with Du for space computation calculation, 2024-11-24T02:49:47,032 WARN [Thread-370 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a3b69e9-b15f-c8cc-be43-f65f8860f049/cluster_e3ba48ec-b8c0-db74-3962-1ace868bf839/data/data6/current/BP-1178940285-172.17.0.2-1732416584320/current, will proceed with Du for space computation calculation, 2024-11-24T02:49:47,054 WARN [Thread-332 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T02:49:47,059 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x44502dd3f084143d with lease ID 0x900090e66ddc8d9e: Processing first storage report for DS-fde1d9ed-8203-4a3a-a017-f0a4c90df74b from datanode DatanodeRegistration(127.0.0.1:40851, datanodeUuid=b0ffa974-e89e-47e0-bf1a-46bf3b9cf3bd, infoPort=35071, infoSecurePort=0, ipcPort=37097, storageInfo=lv=-57;cid=testClusterID;nsid=1372089923;c=1732416584320) 2024-11-24T02:49:47,059 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x44502dd3f084143d with lease ID 0x900090e66ddc8d9e: from storage DS-fde1d9ed-8203-4a3a-a017-f0a4c90df74b node DatanodeRegistration(127.0.0.1:40851, datanodeUuid=b0ffa974-e89e-47e0-bf1a-46bf3b9cf3bd, infoPort=35071, infoSecurePort=0, ipcPort=37097, storageInfo=lv=-57;cid=testClusterID;nsid=1372089923;c=1732416584320), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T02:49:47,059 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x44502dd3f084143d with lease ID 0x900090e66ddc8d9e: Processing first storage report for DS-13b87673-8e87-4567-816a-4475c784e49a from datanode DatanodeRegistration(127.0.0.1:40851, datanodeUuid=b0ffa974-e89e-47e0-bf1a-46bf3b9cf3bd, infoPort=35071, infoSecurePort=0, ipcPort=37097, storageInfo=lv=-57;cid=testClusterID;nsid=1372089923;c=1732416584320) 2024-11-24T02:49:47,059 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x44502dd3f084143d with lease ID 0x900090e66ddc8d9e: from storage DS-13b87673-8e87-4567-816a-4475c784e49a node DatanodeRegistration(127.0.0.1:40851, datanodeUuid=b0ffa974-e89e-47e0-bf1a-46bf3b9cf3bd, infoPort=35071, infoSecurePort=0, ipcPort=37097, storageInfo=lv=-57;cid=testClusterID;nsid=1372089923;c=1732416584320), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T02:49:47,129 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a3b69e9-b15f-c8cc-be43-f65f8860f049 2024-11-24T02:49:47,139 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a3b69e9-b15f-c8cc-be43-f65f8860f049/cluster_e3ba48ec-b8c0-db74-3962-1ace868bf839/zookeeper_0, clientPort=60680, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a3b69e9-b15f-c8cc-be43-f65f8860f049/cluster_e3ba48ec-b8c0-db74-3962-1ace868bf839/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a3b69e9-b15f-c8cc-be43-f65f8860f049/cluster_e3ba48ec-b8c0-db74-3962-1ace868bf839/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-24T02:49:47,140 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=60680 2024-11-24T02:49:47,141 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T02:49:47,143 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T02:49:47,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35293 is added to blk_1073741825_1001 (size=7) 2024-11-24T02:49:47,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40851 is added to blk_1073741825_1001 (size=7) 2024-11-24T02:49:47,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39123 is added to blk_1073741825_1001 (size=7) 2024-11-24T02:49:47,164 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:35433/user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d with version=8 2024-11-24T02:49:47,164 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:43341/user/jenkins/test-data/2f50fdcd-1c9b-5edb-ddb3-4d602a2ea19d/hbase-staging 2024-11-24T02:49:47,167 INFO [Time-limited test {}] client.ConnectionUtils(128): master/7c69a60bd8f6:0 server-side Connection retries=45 2024-11-24T02:49:47,167 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T02:49:47,167 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T02:49:47,167 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T02:49:47,167 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T02:49:47,167 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T02:49:47,167 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-24T02:49:47,167 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T02:49:47,168 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34451 2024-11-24T02:49:47,170 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:34451 connecting to ZooKeeper ensemble=127.0.0.1:60680 2024-11-24T02:49:47,247 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:344510x0, quorum=127.0.0.1:60680, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T02:49:47,248 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:34451-0x1016abf41270000 connected 2024-11-24T02:49:47,332 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T02:49:47,336 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T02:49:47,342 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34451-0x1016abf41270000, quorum=127.0.0.1:60680, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T02:49:47,342 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:35433/user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d, hbase.cluster.distributed=false 2024-11-24T02:49:47,344 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34451-0x1016abf41270000, quorum=127.0.0.1:60680, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T02:49:47,345 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34451 2024-11-24T02:49:47,345 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34451 2024-11-24T02:49:47,346 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34451 2024-11-24T02:49:47,346 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34451 2024-11-24T02:49:47,346 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34451 2024-11-24T02:49:47,363 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/7c69a60bd8f6:0 server-side Connection retries=45 2024-11-24T02:49:47,363 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T02:49:47,363 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T02:49:47,363 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T02:49:47,363 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T02:49:47,363 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T02:49:47,364 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-24T02:49:47,364 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T02:49:47,377 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46185 2024-11-24T02:49:47,379 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:46185 connecting to ZooKeeper ensemble=127.0.0.1:60680 2024-11-24T02:49:47,380 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T02:49:47,382 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T02:49:47,394 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:461850x0, quorum=127.0.0.1:60680, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T02:49:47,395 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:461850x0, quorum=127.0.0.1:60680, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T02:49:47,395 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46185-0x1016abf41270001 connected 2024-11-24T02:49:47,395 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-24T02:49:47,396 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-24T02:49:47,397 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46185-0x1016abf41270001, quorum=127.0.0.1:60680, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-24T02:49:47,398 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46185-0x1016abf41270001, quorum=127.0.0.1:60680, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T02:49:47,400 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46185 2024-11-24T02:49:47,400 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46185 2024-11-24T02:49:47,400 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46185 2024-11-24T02:49:47,404 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46185 2024-11-24T02:49:47,404 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46185 2024-11-24T02:49:47,422 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/7c69a60bd8f6:0 server-side Connection retries=45 2024-11-24T02:49:47,422 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T02:49:47,422 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T02:49:47,422 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T02:49:47,423 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T02:49:47,423 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T02:49:47,423 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-24T02:49:47,423 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T02:49:47,424 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36867 2024-11-24T02:49:47,425 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:36867 connecting to ZooKeeper ensemble=127.0.0.1:60680 2024-11-24T02:49:47,426 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T02:49:47,428 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T02:49:47,436 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:368670x0, quorum=127.0.0.1:60680, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T02:49:47,437 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36867-0x1016abf41270002 connected 2024-11-24T02:49:47,437 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36867-0x1016abf41270002, quorum=127.0.0.1:60680, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T02:49:47,437 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-24T02:49:47,438 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-24T02:49:47,439 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36867-0x1016abf41270002, quorum=127.0.0.1:60680, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-24T02:49:47,440 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36867-0x1016abf41270002, quorum=127.0.0.1:60680, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T02:49:47,440 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36867 2024-11-24T02:49:47,441 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36867 2024-11-24T02:49:47,441 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36867 2024-11-24T02:49:47,441 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36867 2024-11-24T02:49:47,441 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36867 2024-11-24T02:49:47,480 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/7c69a60bd8f6:0 server-side Connection retries=45 2024-11-24T02:49:47,481 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T02:49:47,481 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T02:49:47,481 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T02:49:47,481 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T02:49:47,481 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T02:49:47,481 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-24T02:49:47,482 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T02:49:47,482 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34975 2024-11-24T02:49:47,484 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34975 connecting to ZooKeeper ensemble=127.0.0.1:60680 2024-11-24T02:49:47,485 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T02:49:47,486 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T02:49:47,500 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:349750x0, quorum=127.0.0.1:60680, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T02:49:47,500 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34975-0x1016abf41270003 connected 2024-11-24T02:49:47,500 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:349750x0, quorum=127.0.0.1:60680, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T02:49:47,501 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-24T02:49:47,502 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-24T02:49:47,503 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34975-0x1016abf41270003, quorum=127.0.0.1:60680, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-24T02:49:47,505 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34975-0x1016abf41270003, quorum=127.0.0.1:60680, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T02:49:47,506 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34975 2024-11-24T02:49:47,507 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34975 2024-11-24T02:49:47,507 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34975 2024-11-24T02:49:47,508 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34975 2024-11-24T02:49:47,508 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34975 2024-11-24T02:49:47,522 DEBUG [M:0;7c69a60bd8f6:34451 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;7c69a60bd8f6:34451 2024-11-24T02:49:47,522 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/7c69a60bd8f6,34451,1732416587166 2024-11-24T02:49:47,531 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36867-0x1016abf41270002, quorum=127.0.0.1:60680, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T02:49:47,531 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34975-0x1016abf41270003, quorum=127.0.0.1:60680, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T02:49:47,531 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46185-0x1016abf41270001, quorum=127.0.0.1:60680, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T02:49:47,531 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34451-0x1016abf41270000, quorum=127.0.0.1:60680, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T02:49:47,533 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34451-0x1016abf41270000, quorum=127.0.0.1:60680, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/7c69a60bd8f6,34451,1732416587166 2024-11-24T02:49:47,542 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46185-0x1016abf41270001, quorum=127.0.0.1:60680, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-24T02:49:47,542 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34975-0x1016abf41270003, quorum=127.0.0.1:60680, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-24T02:49:47,542 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36867-0x1016abf41270002, quorum=127.0.0.1:60680, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-24T02:49:47,542 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34451-0x1016abf41270000, quorum=127.0.0.1:60680, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:49:47,542 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46185-0x1016abf41270001, quorum=127.0.0.1:60680, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:49:47,542 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36867-0x1016abf41270002, quorum=127.0.0.1:60680, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:49:47,542 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34975-0x1016abf41270003, quorum=127.0.0.1:60680, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:49:47,544 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34451-0x1016abf41270000, quorum=127.0.0.1:60680, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-24T02:49:47,545 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/7c69a60bd8f6,34451,1732416587166 from backup master directory 2024-11-24T02:49:47,552 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34451-0x1016abf41270000, quorum=127.0.0.1:60680, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/7c69a60bd8f6,34451,1732416587166 2024-11-24T02:49:47,552 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36867-0x1016abf41270002, quorum=127.0.0.1:60680, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T02:49:47,552 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46185-0x1016abf41270001, quorum=127.0.0.1:60680, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T02:49:47,552 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34975-0x1016abf41270003, quorum=127.0.0.1:60680, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T02:49:47,552 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34451-0x1016abf41270000, quorum=127.0.0.1:60680, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T02:49:47,552 WARN [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T02:49:47,552 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=7c69a60bd8f6,34451,1732416587166 2024-11-24T02:49:47,560 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:35433/user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d/hbase.id] with ID: d3225b81-0a8f-4423-b84f-eade4e04101e 2024-11-24T02:49:47,561 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:35433/user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d/.tmp/hbase.id 2024-11-24T02:49:47,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40851 is added to blk_1073741826_1002 (size=42) 2024-11-24T02:49:47,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39123 is added to blk_1073741826_1002 (size=42) 2024-11-24T02:49:47,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35293 is added to blk_1073741826_1002 (size=42) 2024-11-24T02:49:47,572 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:35433/user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d/.tmp/hbase.id]:[hdfs://localhost:35433/user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d/hbase.id] 2024-11-24T02:49:47,598 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T02:49:47,598 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-24T02:49:47,600 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-24T02:49:47,614 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34451-0x1016abf41270000, quorum=127.0.0.1:60680, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:49:47,614 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36867-0x1016abf41270002, quorum=127.0.0.1:60680, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:49:47,614 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34975-0x1016abf41270003, quorum=127.0.0.1:60680, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:49:47,614 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46185-0x1016abf41270001, quorum=127.0.0.1:60680, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:49:47,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35293 is added to blk_1073741827_1003 (size=196) 2024-11-24T02:49:47,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40851 is added to blk_1073741827_1003 (size=196) 2024-11-24T02:49:47,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39123 is added to blk_1073741827_1003 (size=196) 2024-11-24T02:49:47,632 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-24T02:49:47,633 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-24T02:49:47,633 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T02:49:47,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40851 is added to blk_1073741828_1004 (size=1189) 2024-11-24T02:49:47,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39123 is added to blk_1073741828_1004 (size=1189) 2024-11-24T02:49:47,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35293 is added to blk_1073741828_1004 (size=1189) 2024-11-24T02:49:47,649 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:35433/user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d/MasterData/data/master/store 2024-11-24T02:49:47,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35293 is added to blk_1073741829_1005 (size=34) 2024-11-24T02:49:47,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40851 is added to blk_1073741829_1005 (size=34) 2024-11-24T02:49:47,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39123 is added to blk_1073741829_1005 (size=34) 2024-11-24T02:49:47,666 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-24T02:49:47,668 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T02:49:47,670 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T02:49:47,670 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T02:49:47,670 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T02:49:47,670 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T02:49:47,670 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T02:49:47,671 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T02:49:47,671 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732416587669Disabling compacts and flushes for region at 1732416587670 (+1 ms)Disabling writes for close at 1732416587670Writing region close event to WAL at 1732416587671 (+1 ms)Closed at 1732416587671 2024-11-24T02:49:47,673 WARN [master/7c69a60bd8f6:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:35433/user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d/MasterData/data/master/store/.initializing 2024-11-24T02:49:47,673 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:35433/user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d/MasterData/WALs/7c69a60bd8f6,34451,1732416587166 2024-11-24T02:49:47,696 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7c69a60bd8f6%2C34451%2C1732416587166, suffix=, logDir=hdfs://localhost:35433/user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d/MasterData/WALs/7c69a60bd8f6,34451,1732416587166, archiveDir=hdfs://localhost:35433/user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d/MasterData/oldWALs, maxLogs=10 2024-11-24T02:49:47,704 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7c69a60bd8f6%2C34451%2C1732416587166.1732416587700 2024-11-24T02:49:47,723 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d/MasterData/WALs/7c69a60bd8f6,34451,1732416587166/7c69a60bd8f6%2C34451%2C1732416587166.1732416587700 2024-11-24T02:49:47,731 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41015:41015),(127.0.0.1/127.0.0.1:41109:41109),(127.0.0.1/127.0.0.1:35071:35071)] 2024-11-24T02:49:47,732 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-24T02:49:47,732 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T02:49:47,736 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:49:47,737 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:49:47,769 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:49:47,790 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-24T02:49:47,793 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:49:47,795 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T02:49:47,796 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:49:47,798 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-24T02:49:47,798 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:49:47,799 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T02:49:47,800 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:49:47,802 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-24T02:49:47,802 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:49:47,803 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T02:49:47,804 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:49:47,806 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-24T02:49:47,806 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:49:47,807 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T02:49:47,808 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:49:47,810 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35433/user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:49:47,811 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35433/user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:49:47,816 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:49:47,816 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:49:47,819 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-24T02:49:47,821 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:49:47,825 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35433/user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T02:49:47,826 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63600279, jitterRate=-0.05228199064731598}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-24T02:49:47,831 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732416587747Initializing all the Stores at 1732416587749 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732416587749Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732416587750 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732416587750Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732416587750Cleaning up temporary data from old regions at 1732416587816 (+66 ms)Region opened successfully at 1732416587831 (+15 ms) 2024-11-24T02:49:47,832 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-24T02:49:47,860 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@23ed17d4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7c69a60bd8f6/172.17.0.2:0 2024-11-24T02:49:47,886 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-24T02:49:47,896 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-24T02:49:47,896 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-24T02:49:47,899 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-24T02:49:47,900 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-24T02:49:47,904 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 3 msec 2024-11-24T02:49:47,904 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-24T02:49:47,925 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-24T02:49:47,932 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34451-0x1016abf41270000, quorum=127.0.0.1:60680, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-24T02:49:47,941 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-24T02:49:47,944 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-24T02:49:47,945 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34451-0x1016abf41270000, quorum=127.0.0.1:60680, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-24T02:49:48,026 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-24T02:49:48,028 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-24T02:49:48,031 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34451-0x1016abf41270000, quorum=127.0.0.1:60680, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-24T02:49:48,036 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-24T02:49:48,037 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34451-0x1016abf41270000, quorum=127.0.0.1:60680, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-24T02:49:48,047 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-24T02:49:48,063 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34451-0x1016abf41270000, quorum=127.0.0.1:60680, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-24T02:49:48,068 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-24T02:49:48,078 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46185-0x1016abf41270001, quorum=127.0.0.1:60680, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T02:49:48,078 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34451-0x1016abf41270000, quorum=127.0.0.1:60680, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T02:49:48,078 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34975-0x1016abf41270003, quorum=127.0.0.1:60680, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T02:49:48,078 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36867-0x1016abf41270002, quorum=127.0.0.1:60680, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T02:49:48,078 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46185-0x1016abf41270001, quorum=127.0.0.1:60680, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:49:48,078 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34451-0x1016abf41270000, quorum=127.0.0.1:60680, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:49:48,079 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34975-0x1016abf41270003, quorum=127.0.0.1:60680, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:49:48,079 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36867-0x1016abf41270002, quorum=127.0.0.1:60680, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:49:48,082 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=7c69a60bd8f6,34451,1732416587166, sessionid=0x1016abf41270000, setting cluster-up flag (Was=false) 2024-11-24T02:49:48,110 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36867-0x1016abf41270002, quorum=127.0.0.1:60680, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:49:48,110 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46185-0x1016abf41270001, quorum=127.0.0.1:60680, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:49:48,110 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34451-0x1016abf41270000, quorum=127.0.0.1:60680, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:49:48,110 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34975-0x1016abf41270003, quorum=127.0.0.1:60680, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:49:48,142 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-24T02:49:48,144 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7c69a60bd8f6,34451,1732416587166 2024-11-24T02:49:48,163 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34451-0x1016abf41270000, quorum=127.0.0.1:60680, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:49:48,163 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46185-0x1016abf41270001, quorum=127.0.0.1:60680, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:49:48,163 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36867-0x1016abf41270002, quorum=127.0.0.1:60680, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:49:48,163 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34975-0x1016abf41270003, quorum=127.0.0.1:60680, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:49:48,194 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-24T02:49:48,198 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7c69a60bd8f6,34451,1732416587166 2024-11-24T02:49:48,208 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:35433/user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-24T02:49:48,212 INFO [RS:1;7c69a60bd8f6:36867 {}] regionserver.HRegionServer(746): ClusterId : d3225b81-0a8f-4423-b84f-eade4e04101e 2024-11-24T02:49:48,212 INFO [RS:2;7c69a60bd8f6:34975 {}] regionserver.HRegionServer(746): ClusterId : d3225b81-0a8f-4423-b84f-eade4e04101e 2024-11-24T02:49:48,212 INFO [RS:0;7c69a60bd8f6:46185 {}] regionserver.HRegionServer(746): ClusterId : d3225b81-0a8f-4423-b84f-eade4e04101e 2024-11-24T02:49:48,214 DEBUG [RS:0;7c69a60bd8f6:46185 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-24T02:49:48,214 DEBUG [RS:2;7c69a60bd8f6:34975 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-24T02:49:48,214 DEBUG [RS:1;7c69a60bd8f6:36867 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-24T02:49:48,238 DEBUG [RS:2;7c69a60bd8f6:34975 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-24T02:49:48,238 DEBUG [RS:1;7c69a60bd8f6:36867 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-24T02:49:48,238 DEBUG [RS:0;7c69a60bd8f6:46185 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-24T02:49:48,238 DEBUG [RS:1;7c69a60bd8f6:36867 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-24T02:49:48,238 DEBUG [RS:0;7c69a60bd8f6:46185 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-24T02:49:48,238 DEBUG [RS:2;7c69a60bd8f6:34975 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-24T02:49:48,248 DEBUG [RS:1;7c69a60bd8f6:36867 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-24T02:49:48,248 DEBUG [RS:2;7c69a60bd8f6:34975 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-24T02:49:48,248 DEBUG [RS:0;7c69a60bd8f6:46185 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-24T02:49:48,249 DEBUG [RS:1;7c69a60bd8f6:36867 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1929194d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7c69a60bd8f6/172.17.0.2:0 2024-11-24T02:49:48,249 DEBUG [RS:2;7c69a60bd8f6:34975 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@53cb2e65, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7c69a60bd8f6/172.17.0.2:0 2024-11-24T02:49:48,249 DEBUG [RS:0;7c69a60bd8f6:46185 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6a28345b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7c69a60bd8f6/172.17.0.2:0 2024-11-24T02:49:48,261 DEBUG [RS:2;7c69a60bd8f6:34975 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;7c69a60bd8f6:34975 2024-11-24T02:49:48,261 DEBUG [RS:1;7c69a60bd8f6:36867 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;7c69a60bd8f6:36867 2024-11-24T02:49:48,262 DEBUG [RS:0;7c69a60bd8f6:46185 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;7c69a60bd8f6:46185 2024-11-24T02:49:48,264 INFO [RS:1;7c69a60bd8f6:36867 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-24T02:49:48,264 INFO [RS:0;7c69a60bd8f6:46185 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-24T02:49:48,264 INFO [RS:2;7c69a60bd8f6:34975 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-24T02:49:48,264 INFO [RS:0;7c69a60bd8f6:46185 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-24T02:49:48,264 INFO [RS:2;7c69a60bd8f6:34975 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-24T02:49:48,264 INFO [RS:1;7c69a60bd8f6:36867 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-24T02:49:48,264 DEBUG [RS:0;7c69a60bd8f6:46185 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-24T02:49:48,264 DEBUG [RS:2;7c69a60bd8f6:34975 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-24T02:49:48,264 DEBUG [RS:1;7c69a60bd8f6:36867 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-24T02:49:48,266 INFO [RS:0;7c69a60bd8f6:46185 {}] regionserver.HRegionServer(2659): reportForDuty to master=7c69a60bd8f6,34451,1732416587166 with port=46185, startcode=1732416587362 2024-11-24T02:49:48,266 INFO [RS:2;7c69a60bd8f6:34975 {}] regionserver.HRegionServer(2659): reportForDuty to master=7c69a60bd8f6,34451,1732416587166 with port=34975, startcode=1732416587480 2024-11-24T02:49:48,266 INFO [RS:1;7c69a60bd8f6:36867 {}] regionserver.HRegionServer(2659): reportForDuty to master=7c69a60bd8f6,34451,1732416587166 with port=36867, startcode=1732416587422 2024-11-24T02:49:48,277 DEBUG [RS:1;7c69a60bd8f6:36867 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-24T02:49:48,277 DEBUG [RS:2;7c69a60bd8f6:34975 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-24T02:49:48,277 DEBUG [RS:0;7c69a60bd8f6:46185 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-24T02:49:48,280 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-24T02:49:48,289 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-24T02:49:48,295 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-24T02:49:48,301 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 7c69a60bd8f6,34451,1732416587166 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-24T02:49:48,308 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/7c69a60bd8f6:0, corePoolSize=5, maxPoolSize=5 2024-11-24T02:49:48,308 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/7c69a60bd8f6:0, corePoolSize=5, maxPoolSize=5 2024-11-24T02:49:48,308 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/7c69a60bd8f6:0, corePoolSize=5, maxPoolSize=5 2024-11-24T02:49:48,308 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/7c69a60bd8f6:0, corePoolSize=5, maxPoolSize=5 2024-11-24T02:49:48,308 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/7c69a60bd8f6:0, corePoolSize=10, maxPoolSize=10 2024-11-24T02:49:48,308 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:49:48,308 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/7c69a60bd8f6:0, corePoolSize=2, maxPoolSize=2 2024-11-24T02:49:48,309 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:49:48,315 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T02:49:48,316 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-24T02:49:48,318 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732416618318 2024-11-24T02:49:48,320 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-24T02:49:48,320 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-24T02:49:48,323 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:49:48,323 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-24T02:49:48,324 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-24T02:49:48,324 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-24T02:49:48,324 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-24T02:49:48,324 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-24T02:49:48,325 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T02:49:48,332 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-24T02:49:48,333 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-24T02:49:48,334 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-24T02:49:48,336 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-24T02:49:48,337 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-24T02:49:48,339 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/7c69a60bd8f6:0:becomeActiveMaster-HFileCleaner.large.0-1732416588338,5,FailOnTimeoutGroup] 2024-11-24T02:49:48,340 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/7c69a60bd8f6:0:becomeActiveMaster-HFileCleaner.small.0-1732416588340,5,FailOnTimeoutGroup] 2024-11-24T02:49:48,340 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T02:49:48,341 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-24T02:49:48,342 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-24T02:49:48,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40851 is added to blk_1073741831_1007 (size=1321) 2024-11-24T02:49:48,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39123 is added to blk_1073741831_1007 (size=1321) 2024-11-24T02:49:48,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35293 is added to blk_1073741831_1007 (size=1321) 2024-11-24T02:49:48,344 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-24T02:49:48,344 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:35433/user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-24T02:49:48,345 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:35433/user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d 2024-11-24T02:49:48,346 INFO [HMaster-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58281, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-24T02:49:48,346 INFO [HMaster-EventLoopGroup-6-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36465, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-24T02:49:48,346 INFO [HMaster-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58825, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-24T02:49:48,352 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34451 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 7c69a60bd8f6,34975,1732416587480 2024-11-24T02:49:48,354 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34451 {}] master.ServerManager(517): Registering regionserver=7c69a60bd8f6,34975,1732416587480 2024-11-24T02:49:48,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40851 is added to blk_1073741832_1008 (size=32) 2024-11-24T02:49:48,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39123 is added to blk_1073741832_1008 (size=32) 2024-11-24T02:49:48,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35293 is added to blk_1073741832_1008 (size=32) 2024-11-24T02:49:48,361 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T02:49:48,364 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T02:49:48,366 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T02:49:48,366 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:49:48,367 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34451 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 7c69a60bd8f6,46185,1732416587362 2024-11-24T02:49:48,367 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34451 {}] master.ServerManager(517): Registering regionserver=7c69a60bd8f6,46185,1732416587362 2024-11-24T02:49:48,367 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T02:49:48,367 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T02:49:48,370 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T02:49:48,370 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:49:48,370 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34451 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 7c69a60bd8f6,36867,1732416587422 2024-11-24T02:49:48,371 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34451 {}] master.ServerManager(517): Registering regionserver=7c69a60bd8f6,36867,1732416587422 2024-11-24T02:49:48,371 DEBUG [RS:2;7c69a60bd8f6:34975 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:35433/user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d 2024-11-24T02:49:48,371 DEBUG [RS:0;7c69a60bd8f6:46185 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:35433/user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d 2024-11-24T02:49:48,371 DEBUG [RS:2;7c69a60bd8f6:34975 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:35433 2024-11-24T02:49:48,371 DEBUG [RS:0;7c69a60bd8f6:46185 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:35433 2024-11-24T02:49:48,371 DEBUG [RS:0;7c69a60bd8f6:46185 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-24T02:49:48,371 DEBUG [RS:2;7c69a60bd8f6:34975 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-24T02:49:48,374 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T02:49:48,374 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T02:49:48,374 DEBUG [RS:1;7c69a60bd8f6:36867 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:35433/user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d 2024-11-24T02:49:48,374 DEBUG [RS:1;7c69a60bd8f6:36867 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:35433 2024-11-24T02:49:48,374 DEBUG [RS:1;7c69a60bd8f6:36867 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-24T02:49:48,377 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T02:49:48,377 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:49:48,378 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T02:49:48,378 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T02:49:48,380 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T02:49:48,380 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:49:48,381 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T02:49:48,381 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T02:49:48,383 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35433/user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d/data/hbase/meta/1588230740 2024-11-24T02:49:48,384 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35433/user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d/data/hbase/meta/1588230740 2024-11-24T02:49:48,387 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T02:49:48,387 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T02:49:48,388 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-24T02:49:48,391 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T02:49:48,396 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35433/user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T02:49:48,397 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75125687, jitterRate=0.11945997178554535}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-24T02:49:48,402 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732416588362Initializing all the Stores at 1732416588363 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732416588363Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732416588363Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732416588363Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732416588363Cleaning up temporary data from old regions at 1732416588387 (+24 ms)Region opened successfully at 1732416588402 (+15 ms) 2024-11-24T02:49:48,402 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T02:49:48,403 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T02:49:48,403 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T02:49:48,403 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T02:49:48,403 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T02:49:48,404 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T02:49:48,404 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732416588402Disabling compacts and flushes for region at 1732416588402Disabling writes for close at 1732416588403 (+1 ms)Writing region close event to WAL at 1732416588404 (+1 ms)Closed at 1732416588404 2024-11-24T02:49:48,407 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T02:49:48,407 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-24T02:49:48,411 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34451-0x1016abf41270000, quorum=127.0.0.1:60680, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T02:49:48,416 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-24T02:49:48,424 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T02:49:48,426 DEBUG [RS:2;7c69a60bd8f6:34975 {}] zookeeper.ZKUtil(111): regionserver:34975-0x1016abf41270003, quorum=127.0.0.1:60680, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/7c69a60bd8f6,34975,1732416587480 2024-11-24T02:49:48,426 DEBUG [RS:0;7c69a60bd8f6:46185 {}] zookeeper.ZKUtil(111): regionserver:46185-0x1016abf41270001, quorum=127.0.0.1:60680, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/7c69a60bd8f6,46185,1732416587362 2024-11-24T02:49:48,426 WARN [RS:0;7c69a60bd8f6:46185 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T02:49:48,426 WARN [RS:2;7c69a60bd8f6:34975 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T02:49:48,426 INFO [RS:0;7c69a60bd8f6:46185 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T02:49:48,426 INFO [RS:2;7c69a60bd8f6:34975 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T02:49:48,426 DEBUG [RS:0;7c69a60bd8f6:46185 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:35433/user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d/WALs/7c69a60bd8f6,46185,1732416587362 2024-11-24T02:49:48,426 DEBUG [RS:2;7c69a60bd8f6:34975 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:35433/user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d/WALs/7c69a60bd8f6,34975,1732416587480 2024-11-24T02:49:48,427 DEBUG [RS:1;7c69a60bd8f6:36867 {}] zookeeper.ZKUtil(111): regionserver:36867-0x1016abf41270002, quorum=127.0.0.1:60680, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/7c69a60bd8f6,36867,1732416587422 2024-11-24T02:49:48,427 WARN [RS:1;7c69a60bd8f6:36867 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T02:49:48,427 INFO [RS:1;7c69a60bd8f6:36867 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T02:49:48,427 DEBUG [RS:1;7c69a60bd8f6:36867 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:35433/user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d/WALs/7c69a60bd8f6,36867,1732416587422 2024-11-24T02:49:48,428 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-24T02:49:48,430 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [7c69a60bd8f6,34975,1732416587480] 2024-11-24T02:49:48,430 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [7c69a60bd8f6,46185,1732416587362] 2024-11-24T02:49:48,430 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [7c69a60bd8f6,36867,1732416587422] 2024-11-24T02:49:48,453 INFO [RS:0;7c69a60bd8f6:46185 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-24T02:49:48,453 INFO [RS:1;7c69a60bd8f6:36867 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-24T02:49:48,453 INFO [RS:2;7c69a60bd8f6:34975 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-24T02:49:48,473 INFO [RS:1;7c69a60bd8f6:36867 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-24T02:49:48,473 INFO [RS:2;7c69a60bd8f6:34975 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-24T02:49:48,473 INFO [RS:0;7c69a60bd8f6:46185 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-24T02:49:48,479 INFO [RS:2;7c69a60bd8f6:34975 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-24T02:49:48,479 INFO [RS:0;7c69a60bd8f6:46185 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-24T02:49:48,479 INFO [RS:1;7c69a60bd8f6:36867 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-24T02:49:48,479 INFO [RS:2;7c69a60bd8f6:34975 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T02:49:48,479 INFO [RS:0;7c69a60bd8f6:46185 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T02:49:48,479 INFO [RS:1;7c69a60bd8f6:36867 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T02:49:48,480 INFO [RS:0;7c69a60bd8f6:46185 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-24T02:49:48,480 INFO [RS:2;7c69a60bd8f6:34975 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-24T02:49:48,480 INFO [RS:1;7c69a60bd8f6:36867 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-24T02:49:48,485 INFO [RS:0;7c69a60bd8f6:46185 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-24T02:49:48,485 INFO [RS:2;7c69a60bd8f6:34975 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-24T02:49:48,485 INFO [RS:1;7c69a60bd8f6:36867 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-24T02:49:48,487 INFO [RS:2;7c69a60bd8f6:34975 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-24T02:49:48,487 INFO [RS:0;7c69a60bd8f6:46185 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-24T02:49:48,487 INFO [RS:1;7c69a60bd8f6:36867 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-24T02:49:48,487 DEBUG [RS:2;7c69a60bd8f6:34975 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:49:48,487 DEBUG [RS:1;7c69a60bd8f6:36867 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:49:48,487 DEBUG [RS:0;7c69a60bd8f6:46185 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:49:48,487 DEBUG [RS:2;7c69a60bd8f6:34975 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:49:48,487 DEBUG [RS:1;7c69a60bd8f6:36867 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:49:48,487 DEBUG [RS:0;7c69a60bd8f6:46185 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:49:48,487 DEBUG [RS:2;7c69a60bd8f6:34975 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:49:48,487 DEBUG [RS:0;7c69a60bd8f6:46185 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:49:48,487 DEBUG [RS:1;7c69a60bd8f6:36867 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:49:48,487 DEBUG [RS:2;7c69a60bd8f6:34975 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:49:48,487 DEBUG [RS:0;7c69a60bd8f6:46185 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:49:48,487 DEBUG [RS:1;7c69a60bd8f6:36867 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:49:48,487 DEBUG [RS:2;7c69a60bd8f6:34975 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:49:48,487 DEBUG [RS:0;7c69a60bd8f6:46185 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:49:48,487 DEBUG [RS:2;7c69a60bd8f6:34975 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/7c69a60bd8f6:0, corePoolSize=2, maxPoolSize=2 2024-11-24T02:49:48,487 DEBUG [RS:1;7c69a60bd8f6:36867 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:49:48,487 DEBUG [RS:0;7c69a60bd8f6:46185 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/7c69a60bd8f6:0, corePoolSize=2, maxPoolSize=2 2024-11-24T02:49:48,488 DEBUG [RS:2;7c69a60bd8f6:34975 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:49:48,488 DEBUG [RS:0;7c69a60bd8f6:46185 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:49:48,488 DEBUG [RS:1;7c69a60bd8f6:36867 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/7c69a60bd8f6:0, corePoolSize=2, maxPoolSize=2 2024-11-24T02:49:48,488 DEBUG [RS:0;7c69a60bd8f6:46185 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:49:48,488 DEBUG [RS:2;7c69a60bd8f6:34975 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:49:48,488 DEBUG [RS:1;7c69a60bd8f6:36867 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:49:48,488 DEBUG [RS:0;7c69a60bd8f6:46185 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:49:48,488 DEBUG [RS:2;7c69a60bd8f6:34975 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:49:48,488 DEBUG [RS:1;7c69a60bd8f6:36867 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:49:48,488 DEBUG [RS:0;7c69a60bd8f6:46185 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:49:48,488 DEBUG [RS:0;7c69a60bd8f6:46185 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:49:48,488 DEBUG [RS:2;7c69a60bd8f6:34975 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:49:48,488 DEBUG [RS:1;7c69a60bd8f6:36867 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:49:48,488 DEBUG [RS:0;7c69a60bd8f6:46185 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:49:48,488 DEBUG [RS:2;7c69a60bd8f6:34975 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:49:48,488 DEBUG [RS:1;7c69a60bd8f6:36867 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:49:48,488 DEBUG [RS:0;7c69a60bd8f6:46185 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/7c69a60bd8f6:0, corePoolSize=3, maxPoolSize=3 2024-11-24T02:49:48,488 DEBUG [RS:2;7c69a60bd8f6:34975 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:49:48,488 DEBUG [RS:1;7c69a60bd8f6:36867 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:49:48,488 DEBUG [RS:0;7c69a60bd8f6:46185 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/7c69a60bd8f6:0, corePoolSize=3, maxPoolSize=3 2024-11-24T02:49:48,488 DEBUG [RS:2;7c69a60bd8f6:34975 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/7c69a60bd8f6:0, corePoolSize=3, maxPoolSize=3 2024-11-24T02:49:48,488 DEBUG [RS:1;7c69a60bd8f6:36867 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:49:48,488 DEBUG [RS:2;7c69a60bd8f6:34975 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/7c69a60bd8f6:0, corePoolSize=3, maxPoolSize=3 2024-11-24T02:49:48,488 DEBUG [RS:1;7c69a60bd8f6:36867 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/7c69a60bd8f6:0, corePoolSize=3, maxPoolSize=3 2024-11-24T02:49:48,488 DEBUG [RS:1;7c69a60bd8f6:36867 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/7c69a60bd8f6:0, corePoolSize=3, maxPoolSize=3 2024-11-24T02:49:48,495 INFO [RS:0;7c69a60bd8f6:46185 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T02:49:48,495 INFO [RS:0;7c69a60bd8f6:46185 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T02:49:48,495 INFO [RS:0;7c69a60bd8f6:46185 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T02:49:48,495 INFO [RS:0;7c69a60bd8f6:46185 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-24T02:49:48,495 INFO [RS:0;7c69a60bd8f6:46185 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-24T02:49:48,495 INFO [RS:0;7c69a60bd8f6:46185 {}] hbase.ChoreService(168): Chore ScheduledChore name=7c69a60bd8f6,46185,1732416587362-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T02:49:48,495 INFO [RS:1;7c69a60bd8f6:36867 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T02:49:48,496 INFO [RS:1;7c69a60bd8f6:36867 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T02:49:48,496 INFO [RS:2;7c69a60bd8f6:34975 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T02:49:48,496 INFO [RS:1;7c69a60bd8f6:36867 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T02:49:48,496 INFO [RS:2;7c69a60bd8f6:34975 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T02:49:48,496 INFO [RS:1;7c69a60bd8f6:36867 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-24T02:49:48,496 INFO [RS:2;7c69a60bd8f6:34975 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T02:49:48,496 INFO [RS:1;7c69a60bd8f6:36867 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-24T02:49:48,496 INFO [RS:2;7c69a60bd8f6:34975 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-24T02:49:48,496 INFO [RS:1;7c69a60bd8f6:36867 {}] hbase.ChoreService(168): Chore ScheduledChore name=7c69a60bd8f6,36867,1732416587422-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T02:49:48,496 INFO [RS:2;7c69a60bd8f6:34975 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-24T02:49:48,496 INFO [RS:2;7c69a60bd8f6:34975 {}] hbase.ChoreService(168): Chore ScheduledChore name=7c69a60bd8f6,34975,1732416587480-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T02:49:48,512 INFO [RS:0;7c69a60bd8f6:46185 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-24T02:49:48,515 INFO [RS:0;7c69a60bd8f6:46185 {}] hbase.ChoreService(168): Chore ScheduledChore name=7c69a60bd8f6,46185,1732416587362-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T02:49:48,515 INFO [RS:0;7c69a60bd8f6:46185 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T02:49:48,515 INFO [RS:0;7c69a60bd8f6:46185 {}] regionserver.Replication(171): 7c69a60bd8f6,46185,1732416587362 started 2024-11-24T02:49:48,518 INFO [RS:2;7c69a60bd8f6:34975 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-24T02:49:48,519 INFO [RS:2;7c69a60bd8f6:34975 {}] hbase.ChoreService(168): Chore ScheduledChore name=7c69a60bd8f6,34975,1732416587480-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T02:49:48,519 INFO [RS:2;7c69a60bd8f6:34975 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T02:49:48,519 INFO [RS:2;7c69a60bd8f6:34975 {}] regionserver.Replication(171): 7c69a60bd8f6,34975,1732416587480 started 2024-11-24T02:49:48,526 INFO [RS:1;7c69a60bd8f6:36867 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-24T02:49:48,526 INFO [RS:1;7c69a60bd8f6:36867 {}] hbase.ChoreService(168): Chore ScheduledChore name=7c69a60bd8f6,36867,1732416587422-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T02:49:48,526 INFO [RS:1;7c69a60bd8f6:36867 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T02:49:48,526 INFO [RS:1;7c69a60bd8f6:36867 {}] regionserver.Replication(171): 7c69a60bd8f6,36867,1732416587422 started 2024-11-24T02:49:48,533 INFO [RS:0;7c69a60bd8f6:46185 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T02:49:48,533 INFO [RS:0;7c69a60bd8f6:46185 {}] regionserver.HRegionServer(1482): Serving as 7c69a60bd8f6,46185,1732416587362, RpcServer on 7c69a60bd8f6/172.17.0.2:46185, sessionid=0x1016abf41270001 2024-11-24T02:49:48,534 DEBUG [RS:0;7c69a60bd8f6:46185 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-24T02:49:48,534 DEBUG [RS:0;7c69a60bd8f6:46185 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 7c69a60bd8f6,46185,1732416587362 2024-11-24T02:49:48,534 DEBUG [RS:0;7c69a60bd8f6:46185 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7c69a60bd8f6,46185,1732416587362' 2024-11-24T02:49:48,534 DEBUG [RS:0;7c69a60bd8f6:46185 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-24T02:49:48,535 DEBUG [RS:0;7c69a60bd8f6:46185 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-24T02:49:48,536 DEBUG [RS:0;7c69a60bd8f6:46185 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-24T02:49:48,536 DEBUG [RS:0;7c69a60bd8f6:46185 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-24T02:49:48,536 DEBUG [RS:0;7c69a60bd8f6:46185 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 7c69a60bd8f6,46185,1732416587362 2024-11-24T02:49:48,536 DEBUG [RS:0;7c69a60bd8f6:46185 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7c69a60bd8f6,46185,1732416587362' 2024-11-24T02:49:48,536 DEBUG [RS:0;7c69a60bd8f6:46185 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-24T02:49:48,537 DEBUG [RS:0;7c69a60bd8f6:46185 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-24T02:49:48,538 DEBUG [RS:0;7c69a60bd8f6:46185 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-24T02:49:48,538 INFO [RS:0;7c69a60bd8f6:46185 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-24T02:49:48,538 INFO [RS:0;7c69a60bd8f6:46185 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-24T02:49:48,539 INFO [RS:2;7c69a60bd8f6:34975 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T02:49:48,539 INFO [RS:2;7c69a60bd8f6:34975 {}] regionserver.HRegionServer(1482): Serving as 7c69a60bd8f6,34975,1732416587480, RpcServer on 7c69a60bd8f6/172.17.0.2:34975, sessionid=0x1016abf41270003 2024-11-24T02:49:48,540 DEBUG [RS:2;7c69a60bd8f6:34975 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-24T02:49:48,540 DEBUG [RS:2;7c69a60bd8f6:34975 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 7c69a60bd8f6,34975,1732416587480 2024-11-24T02:49:48,540 DEBUG [RS:2;7c69a60bd8f6:34975 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7c69a60bd8f6,34975,1732416587480' 2024-11-24T02:49:48,540 DEBUG [RS:2;7c69a60bd8f6:34975 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-24T02:49:48,540 DEBUG [RS:2;7c69a60bd8f6:34975 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-24T02:49:48,541 DEBUG [RS:2;7c69a60bd8f6:34975 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-24T02:49:48,541 DEBUG [RS:2;7c69a60bd8f6:34975 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-24T02:49:48,541 DEBUG [RS:2;7c69a60bd8f6:34975 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 7c69a60bd8f6,34975,1732416587480 2024-11-24T02:49:48,541 DEBUG [RS:2;7c69a60bd8f6:34975 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7c69a60bd8f6,34975,1732416587480' 2024-11-24T02:49:48,541 DEBUG [RS:2;7c69a60bd8f6:34975 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-24T02:49:48,542 DEBUG [RS:2;7c69a60bd8f6:34975 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-24T02:49:48,542 DEBUG [RS:2;7c69a60bd8f6:34975 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-24T02:49:48,542 INFO [RS:2;7c69a60bd8f6:34975 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-24T02:49:48,542 INFO [RS:2;7c69a60bd8f6:34975 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-24T02:49:48,545 INFO [RS:1;7c69a60bd8f6:36867 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T02:49:48,545 INFO [RS:1;7c69a60bd8f6:36867 {}] regionserver.HRegionServer(1482): Serving as 7c69a60bd8f6,36867,1732416587422, RpcServer on 7c69a60bd8f6/172.17.0.2:36867, sessionid=0x1016abf41270002 2024-11-24T02:49:48,546 DEBUG [RS:1;7c69a60bd8f6:36867 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-24T02:49:48,546 DEBUG [RS:1;7c69a60bd8f6:36867 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 7c69a60bd8f6,36867,1732416587422 2024-11-24T02:49:48,546 DEBUG [RS:1;7c69a60bd8f6:36867 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7c69a60bd8f6,36867,1732416587422' 2024-11-24T02:49:48,546 DEBUG [RS:1;7c69a60bd8f6:36867 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-24T02:49:48,547 DEBUG [RS:1;7c69a60bd8f6:36867 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-24T02:49:48,547 DEBUG [RS:1;7c69a60bd8f6:36867 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-24T02:49:48,547 DEBUG [RS:1;7c69a60bd8f6:36867 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-24T02:49:48,547 DEBUG [RS:1;7c69a60bd8f6:36867 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 7c69a60bd8f6,36867,1732416587422 2024-11-24T02:49:48,547 DEBUG [RS:1;7c69a60bd8f6:36867 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7c69a60bd8f6,36867,1732416587422' 2024-11-24T02:49:48,547 DEBUG [RS:1;7c69a60bd8f6:36867 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-24T02:49:48,548 DEBUG [RS:1;7c69a60bd8f6:36867 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-24T02:49:48,548 DEBUG [RS:1;7c69a60bd8f6:36867 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-24T02:49:48,548 INFO [RS:1;7c69a60bd8f6:36867 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-24T02:49:48,549 INFO [RS:1;7c69a60bd8f6:36867 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-24T02:49:48,579 WARN [7c69a60bd8f6:34451 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-24T02:49:48,647 INFO [RS:0;7c69a60bd8f6:46185 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7c69a60bd8f6%2C46185%2C1732416587362, suffix=, logDir=hdfs://localhost:35433/user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d/WALs/7c69a60bd8f6,46185,1732416587362, archiveDir=hdfs://localhost:35433/user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d/oldWALs, maxLogs=32 2024-11-24T02:49:48,647 INFO [RS:2;7c69a60bd8f6:34975 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7c69a60bd8f6%2C34975%2C1732416587480, suffix=, logDir=hdfs://localhost:35433/user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d/WALs/7c69a60bd8f6,34975,1732416587480, archiveDir=hdfs://localhost:35433/user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d/oldWALs, maxLogs=32 2024-11-24T02:49:48,651 INFO [RS:0;7c69a60bd8f6:46185 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7c69a60bd8f6%2C46185%2C1732416587362.1732416588651 2024-11-24T02:49:48,651 INFO [RS:2;7c69a60bd8f6:34975 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7c69a60bd8f6%2C34975%2C1732416587480.1732416588651 2024-11-24T02:49:48,652 INFO [RS:1;7c69a60bd8f6:36867 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7c69a60bd8f6%2C36867%2C1732416587422, suffix=, logDir=hdfs://localhost:35433/user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d/WALs/7c69a60bd8f6,36867,1732416587422, archiveDir=hdfs://localhost:35433/user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d/oldWALs, maxLogs=32 2024-11-24T02:49:48,655 INFO [RS:1;7c69a60bd8f6:36867 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7c69a60bd8f6%2C36867%2C1732416587422.1732416588654 2024-11-24T02:49:48,674 INFO [RS:2;7c69a60bd8f6:34975 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d/WALs/7c69a60bd8f6,34975,1732416587480/7c69a60bd8f6%2C34975%2C1732416587480.1732416588651 2024-11-24T02:49:48,675 INFO [RS:0;7c69a60bd8f6:46185 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d/WALs/7c69a60bd8f6,46185,1732416587362/7c69a60bd8f6%2C46185%2C1732416587362.1732416588651 2024-11-24T02:49:48,680 DEBUG [RS:2;7c69a60bd8f6:34975 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35071:35071),(127.0.0.1/127.0.0.1:41015:41015),(127.0.0.1/127.0.0.1:41109:41109)] 2024-11-24T02:49:48,681 DEBUG [RS:0;7c69a60bd8f6:46185 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35071:35071),(127.0.0.1/127.0.0.1:41015:41015),(127.0.0.1/127.0.0.1:41109:41109)] 2024-11-24T02:49:48,681 INFO [RS:1;7c69a60bd8f6:36867 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d/WALs/7c69a60bd8f6,36867,1732416587422/7c69a60bd8f6%2C36867%2C1732416587422.1732416588654 2024-11-24T02:49:48,682 DEBUG [RS:1;7c69a60bd8f6:36867 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41015:41015),(127.0.0.1/127.0.0.1:41109:41109),(127.0.0.1/127.0.0.1:35071:35071)] 2024-11-24T02:49:48,832 DEBUG [7c69a60bd8f6:34451 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-11-24T02:49:48,858 DEBUG [7c69a60bd8f6:34451 {}] balancer.BalancerClusterState(204): Hosts are {7c69a60bd8f6=0} racks are {/default-rack=0} 2024-11-24T02:49:48,864 DEBUG [7c69a60bd8f6:34451 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-24T02:49:48,864 DEBUG [7c69a60bd8f6:34451 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-24T02:49:48,865 DEBUG [7c69a60bd8f6:34451 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-24T02:49:48,865 DEBUG [7c69a60bd8f6:34451 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-24T02:49:48,865 DEBUG [7c69a60bd8f6:34451 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-24T02:49:48,865 DEBUG [7c69a60bd8f6:34451 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-24T02:49:48,865 INFO [7c69a60bd8f6:34451 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-24T02:49:48,865 INFO [7c69a60bd8f6:34451 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-24T02:49:48,865 INFO [7c69a60bd8f6:34451 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-24T02:49:48,865 DEBUG [7c69a60bd8f6:34451 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-24T02:49:48,873 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=7c69a60bd8f6,46185,1732416587362 2024-11-24T02:49:48,879 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7c69a60bd8f6,46185,1732416587362, state=OPENING 2024-11-24T02:49:48,931 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-24T02:49:48,942 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36867-0x1016abf41270002, quorum=127.0.0.1:60680, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:49:48,942 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34975-0x1016abf41270003, quorum=127.0.0.1:60680, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:49:48,942 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46185-0x1016abf41270001, quorum=127.0.0.1:60680, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:49:48,942 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34451-0x1016abf41270000, quorum=127.0.0.1:60680, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:49:48,943 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T02:49:48,943 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T02:49:48,943 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T02:49:48,943 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T02:49:48,944 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T02:49:48,947 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=7c69a60bd8f6,46185,1732416587362}] 2024-11-24T02:49:49,127 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-24T02:49:49,129 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50909, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-24T02:49:49,143 INFO [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-24T02:49:49,144 INFO [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T02:49:49,147 INFO [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7c69a60bd8f6%2C46185%2C1732416587362.meta, suffix=.meta, logDir=hdfs://localhost:35433/user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d/WALs/7c69a60bd8f6,46185,1732416587362, archiveDir=hdfs://localhost:35433/user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d/oldWALs, maxLogs=32 2024-11-24T02:49:49,149 INFO [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 7c69a60bd8f6%2C46185%2C1732416587362.meta.1732416589149.meta 2024-11-24T02:49:49,159 INFO [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d/WALs/7c69a60bd8f6,46185,1732416587362/7c69a60bd8f6%2C46185%2C1732416587362.meta.1732416589149.meta 2024-11-24T02:49:49,160 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35071:35071),(127.0.0.1/127.0.0.1:41109:41109),(127.0.0.1/127.0.0.1:41015:41015)] 2024-11-24T02:49:49,161 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-24T02:49:49,163 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-24T02:49:49,165 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-24T02:49:49,169 INFO [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-24T02:49:49,173 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-24T02:49:49,173 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T02:49:49,174 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-24T02:49:49,174 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-24T02:49:49,177 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T02:49:49,178 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T02:49:49,178 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:49:49,179 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T02:49:49,179 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T02:49:49,180 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T02:49:49,180 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:49:49,181 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T02:49:49,181 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T02:49:49,182 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T02:49:49,182 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:49:49,183 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T02:49:49,183 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T02:49:49,184 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T02:49:49,184 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:49:49,185 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T02:49:49,185 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T02:49:49,186 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35433/user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d/data/hbase/meta/1588230740 2024-11-24T02:49:49,188 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35433/user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d/data/hbase/meta/1588230740 2024-11-24T02:49:49,190 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T02:49:49,190 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T02:49:49,191 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-24T02:49:49,193 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T02:49:49,194 INFO [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63153698, jitterRate=-0.05893656611442566}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-24T02:49:49,194 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-24T02:49:49,195 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732416589174Writing region info on filesystem at 1732416589174Initializing all the Stores at 1732416589176 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732416589176Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732416589177 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732416589177Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732416589177Cleaning up temporary data from old regions at 1732416589190 (+13 ms)Running coprocessor post-open hooks at 1732416589194 (+4 ms)Region opened successfully at 1732416589195 (+1 ms) 2024-11-24T02:49:49,201 INFO [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732416589118 2024-11-24T02:49:49,212 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-24T02:49:49,212 INFO [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-24T02:49:49,214 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=7c69a60bd8f6,46185,1732416587362 2024-11-24T02:49:49,217 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7c69a60bd8f6,46185,1732416587362, state=OPEN 2024-11-24T02:49:49,230 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36867-0x1016abf41270002, quorum=127.0.0.1:60680, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T02:49:49,230 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46185-0x1016abf41270001, quorum=127.0.0.1:60680, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T02:49:49,230 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34975-0x1016abf41270003, quorum=127.0.0.1:60680, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T02:49:49,230 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34451-0x1016abf41270000, quorum=127.0.0.1:60680, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T02:49:49,230 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T02:49:49,230 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T02:49:49,230 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T02:49:49,230 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T02:49:49,231 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=7c69a60bd8f6,46185,1732416587362 2024-11-24T02:49:49,238 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-24T02:49:49,238 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=7c69a60bd8f6,46185,1732416587362 in 285 msec 2024-11-24T02:49:49,247 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-24T02:49:49,247 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 824 msec 2024-11-24T02:49:49,249 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T02:49:49,249 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-24T02:49:49,266 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T02:49:49,267 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=7c69a60bd8f6,46185,1732416587362, seqNum=-1] 2024-11-24T02:49:49,286 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T02:49:49,289 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58119, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T02:49:49,308 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.0700 sec 2024-11-24T02:49:49,309 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732416589308, completionTime=-1 2024-11-24T02:49:49,311 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-11-24T02:49:49,311 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-24T02:49:49,342 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-11-24T02:49:49,342 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732416649342 2024-11-24T02:49:49,342 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732416709342 2024-11-24T02:49:49,342 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 31 msec 2024-11-24T02:49:49,345 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-11-24T02:49:49,351 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7c69a60bd8f6,34451,1732416587166-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T02:49:49,351 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7c69a60bd8f6,34451,1732416587166-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T02:49:49,352 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7c69a60bd8f6,34451,1732416587166-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T02:49:49,353 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-7c69a60bd8f6:34451, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T02:49:49,354 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-24T02:49:49,354 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-24T02:49:49,362 DEBUG [master/7c69a60bd8f6:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-24T02:49:49,380 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.827sec 2024-11-24T02:49:49,382 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-24T02:49:49,383 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-24T02:49:49,384 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-24T02:49:49,385 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-24T02:49:49,385 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-24T02:49:49,385 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7c69a60bd8f6,34451,1732416587166-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T02:49:49,386 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7c69a60bd8f6,34451,1732416587166-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-24T02:49:49,390 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-24T02:49:49,391 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-24T02:49:49,391 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7c69a60bd8f6,34451,1732416587166-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T02:49:49,421 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4e95def0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T02:49:49,423 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-24T02:49:49,423 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-24T02:49:49,426 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 7c69a60bd8f6,34451,-1 for getting cluster id 2024-11-24T02:49:49,429 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-24T02:49:49,436 DEBUG [HMaster-EventLoopGroup-6-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'd3225b81-0a8f-4423-b84f-eade4e04101e' 2024-11-24T02:49:49,438 DEBUG [RPCClient-NioEventLoopGroup-10-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-24T02:49:49,438 DEBUG [RPCClient-NioEventLoopGroup-10-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "d3225b81-0a8f-4423-b84f-eade4e04101e" 2024-11-24T02:49:49,439 DEBUG [RPCClient-NioEventLoopGroup-10-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7779b234, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T02:49:49,439 DEBUG [RPCClient-NioEventLoopGroup-10-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [7c69a60bd8f6,34451,-1] 2024-11-24T02:49:49,441 DEBUG [RPCClient-NioEventLoopGroup-10-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-24T02:49:49,443 DEBUG [RPCClient-NioEventLoopGroup-10-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T02:49:49,444 INFO [HMaster-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51812, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-24T02:49:49,447 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@378c22ee, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T02:49:49,447 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T02:49:49,454 DEBUG [RPCClient-NioEventLoopGroup-10-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=7c69a60bd8f6,46185,1732416587362, seqNum=-1] 2024-11-24T02:49:49,455 DEBUG [RPCClient-NioEventLoopGroup-10-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T02:49:49,457 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52164, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T02:49:49,476 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=7c69a60bd8f6,34451,1732416587166 2024-11-24T02:49:49,481 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-24T02:49:49,486 DEBUG [RPCClient-NioEventLoopGroup-10-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 7c69a60bd8f6,34451,1732416587166 2024-11-24T02:49:49,488 DEBUG [RPCClient-NioEventLoopGroup-10-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@4dbd965c 2024-11-24T02:49:49,489 DEBUG [RPCClient-NioEventLoopGroup-10-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-24T02:49:49,492 INFO [HMaster-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51824, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-24T02:49:49,497 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34451 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-24T02:49:49,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34451 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-11-24T02:49:49,508 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-11-24T02:49:49,510 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34451 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-11-24T02:49:49,510 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:49:49,513 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-24T02:49:49,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34451 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-24T02:49:49,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35293 is added to blk_1073741837_1013 (size=392) 2024-11-24T02:49:49,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39123 is added to blk_1073741837_1013 (size=392) 2024-11-24T02:49:49,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40851 is added to blk_1073741837_1013 (size=392) 2024-11-24T02:49:49,534 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 99b4a5bbab74ea8e8dbaaededc70e278, NAME => 'TestHBaseWalOnEC,,1732416589493.99b4a5bbab74ea8e8dbaaededc70e278.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35433/user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d 2024-11-24T02:49:49,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39123 is added to blk_1073741838_1014 (size=51) 2024-11-24T02:49:49,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40851 is added to blk_1073741838_1014 (size=51) 2024-11-24T02:49:49,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35293 is added to blk_1073741838_1014 (size=51) 2024-11-24T02:49:49,550 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1732416589493.99b4a5bbab74ea8e8dbaaededc70e278.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T02:49:49,550 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing 99b4a5bbab74ea8e8dbaaededc70e278, disabling compactions & flushes 2024-11-24T02:49:49,551 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1732416589493.99b4a5bbab74ea8e8dbaaededc70e278. 2024-11-24T02:49:49,551 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1732416589493.99b4a5bbab74ea8e8dbaaededc70e278. 2024-11-24T02:49:49,551 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1732416589493.99b4a5bbab74ea8e8dbaaededc70e278. after waiting 0 ms 2024-11-24T02:49:49,551 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1732416589493.99b4a5bbab74ea8e8dbaaededc70e278. 2024-11-24T02:49:49,551 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1732416589493.99b4a5bbab74ea8e8dbaaededc70e278. 2024-11-24T02:49:49,551 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for 99b4a5bbab74ea8e8dbaaededc70e278: Waiting for close lock at 1732416589550Disabling compacts and flushes for region at 1732416589550Disabling writes for close at 1732416589551 (+1 ms)Writing region close event to WAL at 1732416589551Closed at 1732416589551 2024-11-24T02:49:49,553 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-11-24T02:49:49,559 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1732416589493.99b4a5bbab74ea8e8dbaaededc70e278.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1732416589553"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732416589553"}]},"ts":"1732416589553"} 2024-11-24T02:49:49,563 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-24T02:49:49,565 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-24T02:49:49,568 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732416589565"}]},"ts":"1732416589565"} 2024-11-24T02:49:49,572 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-11-24T02:49:49,572 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {7c69a60bd8f6=0} racks are {/default-rack=0} 2024-11-24T02:49:49,574 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-24T02:49:49,574 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-24T02:49:49,574 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-24T02:49:49,574 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-24T02:49:49,574 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-24T02:49:49,574 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-24T02:49:49,574 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-24T02:49:49,574 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-24T02:49:49,574 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-24T02:49:49,574 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-24T02:49:49,575 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=99b4a5bbab74ea8e8dbaaededc70e278, ASSIGN}] 2024-11-24T02:49:49,578 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=99b4a5bbab74ea8e8dbaaededc70e278, ASSIGN 2024-11-24T02:49:49,580 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=99b4a5bbab74ea8e8dbaaededc70e278, ASSIGN; state=OFFLINE, location=7c69a60bd8f6,34975,1732416587480; forceNewPlan=false, retain=false 2024-11-24T02:49:49,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34451 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-24T02:49:49,734 INFO [7c69a60bd8f6:34451 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-24T02:49:49,734 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=99b4a5bbab74ea8e8dbaaededc70e278, regionState=OPENING, regionLocation=7c69a60bd8f6,34975,1732416587480 2024-11-24T02:49:49,740 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-9-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=99b4a5bbab74ea8e8dbaaededc70e278, ASSIGN because future has completed 2024-11-24T02:49:49,741 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 99b4a5bbab74ea8e8dbaaededc70e278, server=7c69a60bd8f6,34975,1732416587480}] 2024-11-24T02:49:49,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34451 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-24T02:49:49,896 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-24T02:49:49,898 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52411, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-24T02:49:49,904 INFO [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1732416589493.99b4a5bbab74ea8e8dbaaededc70e278. 2024-11-24T02:49:49,904 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 99b4a5bbab74ea8e8dbaaededc70e278, NAME => 'TestHBaseWalOnEC,,1732416589493.99b4a5bbab74ea8e8dbaaededc70e278.', STARTKEY => '', ENDKEY => ''} 2024-11-24T02:49:49,904 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC 99b4a5bbab74ea8e8dbaaededc70e278 2024-11-24T02:49:49,904 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1732416589493.99b4a5bbab74ea8e8dbaaededc70e278.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T02:49:49,905 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 99b4a5bbab74ea8e8dbaaededc70e278 2024-11-24T02:49:49,905 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 99b4a5bbab74ea8e8dbaaededc70e278 2024-11-24T02:49:49,907 INFO [StoreOpener-99b4a5bbab74ea8e8dbaaededc70e278-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 99b4a5bbab74ea8e8dbaaededc70e278 2024-11-24T02:49:49,909 INFO [StoreOpener-99b4a5bbab74ea8e8dbaaededc70e278-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 99b4a5bbab74ea8e8dbaaededc70e278 columnFamilyName cf 2024-11-24T02:49:49,909 DEBUG [StoreOpener-99b4a5bbab74ea8e8dbaaededc70e278-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:49:49,910 INFO [StoreOpener-99b4a5bbab74ea8e8dbaaededc70e278-1 {}] regionserver.HStore(327): Store=99b4a5bbab74ea8e8dbaaededc70e278/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T02:49:49,910 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 99b4a5bbab74ea8e8dbaaededc70e278 2024-11-24T02:49:49,911 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35433/user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d/data/default/TestHBaseWalOnEC/99b4a5bbab74ea8e8dbaaededc70e278 2024-11-24T02:49:49,911 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35433/user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d/data/default/TestHBaseWalOnEC/99b4a5bbab74ea8e8dbaaededc70e278 2024-11-24T02:49:49,912 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 99b4a5bbab74ea8e8dbaaededc70e278 2024-11-24T02:49:49,912 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 99b4a5bbab74ea8e8dbaaededc70e278 2024-11-24T02:49:49,914 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 99b4a5bbab74ea8e8dbaaededc70e278 2024-11-24T02:49:49,918 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35433/user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d/data/default/TestHBaseWalOnEC/99b4a5bbab74ea8e8dbaaededc70e278/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T02:49:49,918 INFO [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 99b4a5bbab74ea8e8dbaaededc70e278; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62855049, jitterRate=-0.06338678300380707}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-24T02:49:49,919 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 99b4a5bbab74ea8e8dbaaededc70e278 2024-11-24T02:49:49,919 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 99b4a5bbab74ea8e8dbaaededc70e278: Running coprocessor pre-open hook at 1732416589905Writing region info on filesystem at 1732416589905Initializing all the Stores at 1732416589906 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732416589907 (+1 ms)Cleaning up temporary data from old regions at 1732416589912 (+5 ms)Running coprocessor post-open hooks at 1732416589919 (+7 ms)Region opened successfully at 1732416589919 2024-11-24T02:49:49,921 INFO [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1732416589493.99b4a5bbab74ea8e8dbaaededc70e278., pid=6, masterSystemTime=1732416589896 2024-11-24T02:49:49,926 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1732416589493.99b4a5bbab74ea8e8dbaaededc70e278. 2024-11-24T02:49:49,926 INFO [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1732416589493.99b4a5bbab74ea8e8dbaaededc70e278. 2024-11-24T02:49:49,927 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=99b4a5bbab74ea8e8dbaaededc70e278, regionState=OPEN, openSeqNum=2, regionLocation=7c69a60bd8f6,34975,1732416587480 2024-11-24T02:49:49,931 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-9-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 99b4a5bbab74ea8e8dbaaededc70e278, server=7c69a60bd8f6,34975,1732416587480 because future has completed 2024-11-24T02:49:49,938 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-24T02:49:49,938 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 99b4a5bbab74ea8e8dbaaededc70e278, server=7c69a60bd8f6,34975,1732416587480 in 192 msec 2024-11-24T02:49:49,943 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-24T02:49:49,943 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=99b4a5bbab74ea8e8dbaaededc70e278, ASSIGN in 363 msec 2024-11-24T02:49:49,945 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-24T02:49:49,945 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732416589945"}]},"ts":"1732416589945"} 2024-11-24T02:49:49,948 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-11-24T02:49:49,950 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-11-24T02:49:49,954 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 450 msec 2024-11-24T02:49:50,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34451 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-24T02:49:50,145 INFO [RPCClient-NioEventLoopGroup-10-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-11-24T02:49:50,145 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-11-24T02:49:50,147 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-24T02:49:50,153 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-11-24T02:49:50,154 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-24T02:49:50,154 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-11-24T02:49:50,163 DEBUG [RPCClient-NioEventLoopGroup-10-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1732416589493.99b4a5bbab74ea8e8dbaaededc70e278., hostname=7c69a60bd8f6,34975,1732416587480, seqNum=2] 2024-11-24T02:49:50,164 DEBUG [RPCClient-NioEventLoopGroup-10-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T02:49:50,167 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54960, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T02:49:50,173 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34451 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-11-24T02:49:50,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34451 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-11-24T02:49:50,179 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-11-24T02:49:50,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34451 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-24T02:49:50,181 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-24T02:49:50,183 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-24T02:49:50,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34451 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-24T02:49:50,353 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34975 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-24T02:49:50,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7c69a60bd8f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1732416589493.99b4a5bbab74ea8e8dbaaededc70e278. 2024-11-24T02:49:50,357 INFO [RS_FLUSH_OPERATIONS-regionserver/7c69a60bd8f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 99b4a5bbab74ea8e8dbaaededc70e278 1/1 column families, dataSize=32 B heapSize=360 B 2024-11-24T02:49:50,403 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7c69a60bd8f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35433/user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d/data/default/TestHBaseWalOnEC/99b4a5bbab74ea8e8dbaaededc70e278/.tmp/cf/c0760d98ac784c9ea718d77c24bad3d9 is 36, key is row/cf:cq/1732416590167/Put/seqid=0 2024-11-24T02:49:50,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35293 is added to blk_1073741839_1015 (size=4787) 2024-11-24T02:49:50,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40851 is added to blk_1073741839_1015 (size=4787) 2024-11-24T02:49:50,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39123 is added to blk_1073741839_1015 (size=4787) 2024-11-24T02:49:50,415 INFO [RS_FLUSH_OPERATIONS-regionserver/7c69a60bd8f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:35433/user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d/data/default/TestHBaseWalOnEC/99b4a5bbab74ea8e8dbaaededc70e278/.tmp/cf/c0760d98ac784c9ea718d77c24bad3d9 2024-11-24T02:49:50,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7c69a60bd8f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35433/user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d/data/default/TestHBaseWalOnEC/99b4a5bbab74ea8e8dbaaededc70e278/.tmp/cf/c0760d98ac784c9ea718d77c24bad3d9 as hdfs://localhost:35433/user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d/data/default/TestHBaseWalOnEC/99b4a5bbab74ea8e8dbaaededc70e278/cf/c0760d98ac784c9ea718d77c24bad3d9 2024-11-24T02:49:50,464 INFO [RS_FLUSH_OPERATIONS-regionserver/7c69a60bd8f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35433/user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d/data/default/TestHBaseWalOnEC/99b4a5bbab74ea8e8dbaaededc70e278/cf/c0760d98ac784c9ea718d77c24bad3d9, entries=1, sequenceid=5, filesize=4.7 K 2024-11-24T02:49:50,471 INFO [RS_FLUSH_OPERATIONS-regionserver/7c69a60bd8f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for 99b4a5bbab74ea8e8dbaaededc70e278 in 112ms, sequenceid=5, compaction requested=false 2024-11-24T02:49:50,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7c69a60bd8f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestHBaseWalOnEC' 2024-11-24T02:49:50,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7c69a60bd8f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 99b4a5bbab74ea8e8dbaaededc70e278: 2024-11-24T02:49:50,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7c69a60bd8f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1732416589493.99b4a5bbab74ea8e8dbaaededc70e278. 2024-11-24T02:49:50,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7c69a60bd8f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-24T02:49:50,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34451 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-24T02:49:50,484 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-24T02:49:50,484 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 296 msec 2024-11-24T02:49:50,488 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 311 msec 2024-11-24T02:49:50,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34451 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-24T02:49:50,502 INFO [RPCClient-NioEventLoopGroup-10-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-11-24T02:49:50,514 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-24T02:49:50,515 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T02:49:50,515 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T02:49:50,515 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T02:49:50,516 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T02:49:50,516 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-24T02:49:50,516 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-24T02:49:50,516 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1462308807, stopped=false 2024-11-24T02:49:50,516 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=7c69a60bd8f6,34451,1732416587166 2024-11-24T02:49:50,625 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36867-0x1016abf41270002, quorum=127.0.0.1:60680, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T02:49:50,625 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34451-0x1016abf41270000, quorum=127.0.0.1:60680, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T02:49:50,625 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46185-0x1016abf41270001, quorum=127.0.0.1:60680, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T02:49:50,625 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34975-0x1016abf41270003, quorum=127.0.0.1:60680, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T02:49:50,625 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46185-0x1016abf41270001, quorum=127.0.0.1:60680, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:49:50,625 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34451-0x1016abf41270000, quorum=127.0.0.1:60680, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:49:50,625 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36867-0x1016abf41270002, quorum=127.0.0.1:60680, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:49:50,625 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34975-0x1016abf41270003, quorum=127.0.0.1:60680, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:49:50,625 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T02:49:50,625 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T02:49:50,625 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T02:49:50,625 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T02:49:50,626 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '7c69a60bd8f6,46185,1732416587362' ***** 2024-11-24T02:49:50,626 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-24T02:49:50,626 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '7c69a60bd8f6,36867,1732416587422' ***** 2024-11-24T02:49:50,626 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-24T02:49:50,626 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36867-0x1016abf41270002, quorum=127.0.0.1:60680, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T02:49:50,626 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34451-0x1016abf41270000, quorum=127.0.0.1:60680, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T02:49:50,626 INFO [RS:1;7c69a60bd8f6:36867 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-24T02:49:50,626 INFO [RS:0;7c69a60bd8f6:46185 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-24T02:49:50,626 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46185-0x1016abf41270001, quorum=127.0.0.1:60680, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T02:49:50,626 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '7c69a60bd8f6,34975,1732416587480' ***** 2024-11-24T02:49:50,626 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-24T02:49:50,627 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34975-0x1016abf41270003, quorum=127.0.0.1:60680, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T02:49:50,627 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-24T02:49:50,627 INFO [RS:0;7c69a60bd8f6:46185 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-24T02:49:50,627 INFO [RS:1;7c69a60bd8f6:36867 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-24T02:49:50,627 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-24T02:49:50,627 INFO [RS:1;7c69a60bd8f6:36867 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-24T02:49:50,627 INFO [RS:1;7c69a60bd8f6:36867 {}] regionserver.HRegionServer(959): stopping server 7c69a60bd8f6,36867,1732416587422 2024-11-24T02:49:50,627 INFO [RS:1;7c69a60bd8f6:36867 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T02:49:50,627 INFO [RS:2;7c69a60bd8f6:34975 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-24T02:49:50,627 INFO [RS:0;7c69a60bd8f6:46185 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-24T02:49:50,627 INFO [RS:1;7c69a60bd8f6:36867 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;7c69a60bd8f6:36867. 2024-11-24T02:49:50,627 INFO [RS:2;7c69a60bd8f6:34975 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-24T02:49:50,627 INFO [RS:0;7c69a60bd8f6:46185 {}] regionserver.HRegionServer(959): stopping server 7c69a60bd8f6,46185,1732416587362 2024-11-24T02:49:50,627 INFO [RS:2;7c69a60bd8f6:34975 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-24T02:49:50,627 INFO [RS:0;7c69a60bd8f6:46185 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T02:49:50,627 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-24T02:49:50,627 DEBUG [RS:1;7c69a60bd8f6:36867 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T02:49:50,628 DEBUG [RS:1;7c69a60bd8f6:36867 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T02:49:50,628 INFO [RS:0;7c69a60bd8f6:46185 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;7c69a60bd8f6:46185. 2024-11-24T02:49:50,628 DEBUG [RS:0;7c69a60bd8f6:46185 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T02:49:50,628 INFO [RS:2;7c69a60bd8f6:34975 {}] regionserver.HRegionServer(3091): Received CLOSE for 99b4a5bbab74ea8e8dbaaededc70e278 2024-11-24T02:49:50,628 DEBUG [RS:0;7c69a60bd8f6:46185 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T02:49:50,628 INFO [RS:1;7c69a60bd8f6:36867 {}] regionserver.HRegionServer(976): stopping server 7c69a60bd8f6,36867,1732416587422; all regions closed. 2024-11-24T02:49:50,628 INFO [RS:0;7c69a60bd8f6:46185 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-24T02:49:50,628 INFO [RS:0;7c69a60bd8f6:46185 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-24T02:49:50,628 INFO [RS:0;7c69a60bd8f6:46185 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-24T02:49:50,628 INFO [RS:0;7c69a60bd8f6:46185 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-24T02:49:50,628 INFO [RS:2;7c69a60bd8f6:34975 {}] regionserver.HRegionServer(959): stopping server 7c69a60bd8f6,34975,1732416587480 2024-11-24T02:49:50,628 INFO [RS:2;7c69a60bd8f6:34975 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T02:49:50,628 INFO [RS:2;7c69a60bd8f6:34975 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;7c69a60bd8f6:34975. 2024-11-24T02:49:50,628 DEBUG [RS:2;7c69a60bd8f6:34975 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T02:49:50,628 INFO [RS:0;7c69a60bd8f6:46185 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-24T02:49:50,629 DEBUG [RS:2;7c69a60bd8f6:34975 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T02:49:50,629 DEBUG [RS:0;7c69a60bd8f6:46185 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-24T02:49:50,629 INFO [RS:2;7c69a60bd8f6:34975 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-24T02:49:50,629 DEBUG [RS:2;7c69a60bd8f6:34975 {}] regionserver.HRegionServer(1325): Online Regions={99b4a5bbab74ea8e8dbaaededc70e278=TestHBaseWalOnEC,,1732416589493.99b4a5bbab74ea8e8dbaaededc70e278.} 2024-11-24T02:49:50,629 DEBUG [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 99b4a5bbab74ea8e8dbaaededc70e278, disabling compactions & flushes 2024-11-24T02:49:50,629 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T02:49:50,629 INFO [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T02:49:50,629 INFO [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1732416589493.99b4a5bbab74ea8e8dbaaededc70e278. 2024-11-24T02:49:50,629 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T02:49:50,629 DEBUG [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1732416589493.99b4a5bbab74ea8e8dbaaededc70e278. 2024-11-24T02:49:50,629 DEBUG [RS:0;7c69a60bd8f6:46185 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-24T02:49:50,629 DEBUG [RS:2;7c69a60bd8f6:34975 {}] regionserver.HRegionServer(1351): Waiting on 99b4a5bbab74ea8e8dbaaededc70e278 2024-11-24T02:49:50,629 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T02:49:50,629 DEBUG [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1732416589493.99b4a5bbab74ea8e8dbaaededc70e278. after waiting 0 ms 2024-11-24T02:49:50,629 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T02:49:50,629 DEBUG [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1732416589493.99b4a5bbab74ea8e8dbaaededc70e278. 2024-11-24T02:49:50,629 INFO [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-11-24T02:49:50,630 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:49:50,630 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:49:50,630 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:49:50,630 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:49:50,630 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:49:50,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40851 is added to blk_1073741835_1011 (size=93) 2024-11-24T02:49:50,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39123 is added to blk_1073741835_1011 (size=93) 2024-11-24T02:49:50,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35293 is added to blk_1073741835_1011 (size=93) 2024-11-24T02:49:50,642 DEBUG [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35433/user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d/data/default/TestHBaseWalOnEC/99b4a5bbab74ea8e8dbaaededc70e278/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-24T02:49:50,644 INFO [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1732416589493.99b4a5bbab74ea8e8dbaaededc70e278. 2024-11-24T02:49:50,645 DEBUG [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 99b4a5bbab74ea8e8dbaaededc70e278: Waiting for close lock at 1732416590628Running coprocessor pre-close hooks at 1732416590629 (+1 ms)Disabling compacts and flushes for region at 1732416590629Disabling writes for close at 1732416590629Writing region close event to WAL at 1732416590635 (+6 ms)Running coprocessor post-close hooks at 1732416590643 (+8 ms)Closed at 1732416590644 (+1 ms) 2024-11-24T02:49:50,645 DEBUG [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1732416589493.99b4a5bbab74ea8e8dbaaededc70e278. 2024-11-24T02:49:50,647 DEBUG [RS:1;7c69a60bd8f6:36867 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d/oldWALs 2024-11-24T02:49:50,647 INFO [RS:1;7c69a60bd8f6:36867 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7c69a60bd8f6%2C36867%2C1732416587422:(num 1732416588654) 2024-11-24T02:49:50,647 DEBUG [RS:1;7c69a60bd8f6:36867 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T02:49:50,647 INFO [RS:1;7c69a60bd8f6:36867 {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T02:49:50,647 INFO [RS:1;7c69a60bd8f6:36867 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T02:49:50,648 INFO [RS:1;7c69a60bd8f6:36867 {}] hbase.ChoreService(370): Chore service for: regionserver/7c69a60bd8f6:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-24T02:49:50,648 INFO [RS:1;7c69a60bd8f6:36867 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-24T02:49:50,648 INFO [RS:1;7c69a60bd8f6:36867 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-24T02:49:50,648 INFO [regionserver/7c69a60bd8f6:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T02:49:50,648 INFO [RS:1;7c69a60bd8f6:36867 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-24T02:49:50,648 INFO [RS:1;7c69a60bd8f6:36867 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T02:49:50,648 INFO [RS:1;7c69a60bd8f6:36867 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36867 2024-11-24T02:49:50,657 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34451-0x1016abf41270000, quorum=127.0.0.1:60680, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T02:49:50,657 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36867-0x1016abf41270002, quorum=127.0.0.1:60680, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/7c69a60bd8f6,36867,1732416587422 2024-11-24T02:49:50,657 INFO [RS:1;7c69a60bd8f6:36867 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T02:49:50,659 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35433/user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d/data/hbase/meta/1588230740/.tmp/info/d2165e655fba40698375379a53bb5ea0 is 153, key is TestHBaseWalOnEC,,1732416589493.99b4a5bbab74ea8e8dbaaededc70e278./info:regioninfo/1732416589927/Put/seqid=0 2024-11-24T02:49:50,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35293 is added to blk_1073741840_1016 (size=6637) 2024-11-24T02:49:50,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39123 is added to blk_1073741840_1016 (size=6637) 2024-11-24T02:49:50,668 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [7c69a60bd8f6,36867,1732416587422] 2024-11-24T02:49:50,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40851 is added to blk_1073741840_1016 (size=6637) 2024-11-24T02:49:50,670 INFO [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35433/user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d/data/hbase/meta/1588230740/.tmp/info/d2165e655fba40698375379a53bb5ea0 2024-11-24T02:49:50,678 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/7c69a60bd8f6,36867,1732416587422 already deleted, retry=false 2024-11-24T02:49:50,678 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 7c69a60bd8f6,36867,1732416587422 expired; onlineServers=2 2024-11-24T02:49:50,697 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35433/user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d/data/hbase/meta/1588230740/.tmp/ns/a39222bbd88e4f3695c6999d8f9207fd is 43, key is default/ns:d/1732416589293/Put/seqid=0 2024-11-24T02:49:50,702 INFO [regionserver/7c69a60bd8f6:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T02:49:50,702 INFO [regionserver/7c69a60bd8f6:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T02:49:50,704 INFO [regionserver/7c69a60bd8f6:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T02:49:50,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35293 is added to blk_1073741841_1017 (size=5153) 2024-11-24T02:49:50,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40851 is added to blk_1073741841_1017 (size=5153) 2024-11-24T02:49:50,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39123 is added to blk_1073741841_1017 (size=5153) 2024-11-24T02:49:50,707 INFO [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35433/user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d/data/hbase/meta/1588230740/.tmp/ns/a39222bbd88e4f3695c6999d8f9207fd 2024-11-24T02:49:50,740 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35433/user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d/data/hbase/meta/1588230740/.tmp/table/c3cdc2020d7e4c97b4df40c3fede7d0d is 52, key is TestHBaseWalOnEC/table:state/1732416589945/Put/seqid=0 2024-11-24T02:49:50,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39123 is added to blk_1073741842_1018 (size=5249) 2024-11-24T02:49:50,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35293 is added to blk_1073741842_1018 (size=5249) 2024-11-24T02:49:50,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40851 is added to blk_1073741842_1018 (size=5249) 2024-11-24T02:49:50,751 INFO [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35433/user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d/data/hbase/meta/1588230740/.tmp/table/c3cdc2020d7e4c97b4df40c3fede7d0d 2024-11-24T02:49:50,761 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35433/user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d/data/hbase/meta/1588230740/.tmp/info/d2165e655fba40698375379a53bb5ea0 as hdfs://localhost:35433/user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d/data/hbase/meta/1588230740/info/d2165e655fba40698375379a53bb5ea0 2024-11-24T02:49:50,768 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36867-0x1016abf41270002, quorum=127.0.0.1:60680, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T02:49:50,768 INFO [RS:1;7c69a60bd8f6:36867 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T02:49:50,768 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36867-0x1016abf41270002, quorum=127.0.0.1:60680, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T02:49:50,768 INFO [RS:1;7c69a60bd8f6:36867 {}] regionserver.HRegionServer(1031): Exiting; stopping=7c69a60bd8f6,36867,1732416587422; zookeeper connection closed. 2024-11-24T02:49:50,769 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@186438ed {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@186438ed 2024-11-24T02:49:50,771 INFO [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35433/user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d/data/hbase/meta/1588230740/info/d2165e655fba40698375379a53bb5ea0, entries=10, sequenceid=11, filesize=6.5 K 2024-11-24T02:49:50,774 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35433/user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d/data/hbase/meta/1588230740/.tmp/ns/a39222bbd88e4f3695c6999d8f9207fd as hdfs://localhost:35433/user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d/data/hbase/meta/1588230740/ns/a39222bbd88e4f3695c6999d8f9207fd 2024-11-24T02:49:50,785 INFO [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35433/user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d/data/hbase/meta/1588230740/ns/a39222bbd88e4f3695c6999d8f9207fd, entries=2, sequenceid=11, filesize=5.0 K 2024-11-24T02:49:50,787 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35433/user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d/data/hbase/meta/1588230740/.tmp/table/c3cdc2020d7e4c97b4df40c3fede7d0d as hdfs://localhost:35433/user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d/data/hbase/meta/1588230740/table/c3cdc2020d7e4c97b4df40c3fede7d0d 2024-11-24T02:49:50,798 INFO [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35433/user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d/data/hbase/meta/1588230740/table/c3cdc2020d7e4c97b4df40c3fede7d0d, entries=2, sequenceid=11, filesize=5.1 K 2024-11-24T02:49:50,800 INFO [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 171ms, sequenceid=11, compaction requested=false 2024-11-24T02:49:50,800 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-24T02:49:50,812 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35433/user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-24T02:49:50,813 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T02:49:50,813 INFO [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T02:49:50,813 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732416590628Running coprocessor pre-close hooks at 1732416590629 (+1 ms)Disabling compacts and flushes for region at 1732416590629Disabling writes for close at 1732416590629Obtaining lock to block concurrent updates at 1732416590629Preparing flush snapshotting stores in 1588230740 at 1732416590629Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1732416590630 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1732416590631 (+1 ms)Flushing 1588230740/info: creating writer at 1732416590631Flushing 1588230740/info: appending metadata at 1732416590654 (+23 ms)Flushing 1588230740/info: closing flushed file at 1732416590654Flushing 1588230740/ns: creating writer at 1732416590681 (+27 ms)Flushing 1588230740/ns: appending metadata at 1732416590695 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1732416590696 (+1 ms)Flushing 1588230740/table: creating writer at 1732416590717 (+21 ms)Flushing 1588230740/table: appending metadata at 1732416590738 (+21 ms)Flushing 1588230740/table: closing flushed file at 1732416590738Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@28512d7b: reopening flushed file at 1732416590760 (+22 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3c4a573: reopening flushed file at 1732416590772 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@546916c2: reopening flushed file at 1732416590785 (+13 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 171ms, sequenceid=11, compaction requested=false at 1732416590800 (+15 ms)Writing region close event to WAL at 1732416590807 (+7 ms)Running coprocessor post-close hooks at 1732416590813 (+6 ms)Closed at 1732416590813 2024-11-24T02:49:50,813 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-24T02:49:50,829 INFO [RS:0;7c69a60bd8f6:46185 {}] regionserver.HRegionServer(976): stopping server 7c69a60bd8f6,46185,1732416587362; all regions closed. 2024-11-24T02:49:50,829 INFO [RS:2;7c69a60bd8f6:34975 {}] regionserver.HRegionServer(976): stopping server 7c69a60bd8f6,34975,1732416587480; all regions closed. 2024-11-24T02:49:50,830 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:49:50,830 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:49:50,830 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:49:50,830 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:49:50,830 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:49:50,830 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:49:50,830 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:49:50,830 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:49:50,831 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:49:50,831 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:49:50,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39123 is added to blk_1073741833_1009 (size=1298) 2024-11-24T02:49:50,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35293 is added to blk_1073741836_1012 (size=2751) 2024-11-24T02:49:50,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40851 is added to blk_1073741833_1009 (size=1298) 2024-11-24T02:49:50,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35293 is added to blk_1073741833_1009 (size=1298) 2024-11-24T02:49:50,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40851 is added to blk_1073741836_1012 (size=2751) 2024-11-24T02:49:50,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39123 is added to blk_1073741836_1012 (size=2751) 2024-11-24T02:49:50,838 DEBUG [RS:2;7c69a60bd8f6:34975 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d/oldWALs 2024-11-24T02:49:50,838 DEBUG [RS:0;7c69a60bd8f6:46185 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d/oldWALs 2024-11-24T02:49:50,838 INFO [RS:2;7c69a60bd8f6:34975 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7c69a60bd8f6%2C34975%2C1732416587480:(num 1732416588651) 2024-11-24T02:49:50,839 INFO [RS:0;7c69a60bd8f6:46185 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7c69a60bd8f6%2C46185%2C1732416587362.meta:.meta(num 1732416589149) 2024-11-24T02:49:50,839 DEBUG [RS:2;7c69a60bd8f6:34975 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T02:49:50,839 INFO [RS:2;7c69a60bd8f6:34975 {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T02:49:50,839 INFO [RS:2;7c69a60bd8f6:34975 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T02:49:50,839 INFO [RS:2;7c69a60bd8f6:34975 {}] hbase.ChoreService(370): Chore service for: regionserver/7c69a60bd8f6:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-24T02:49:50,839 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:49:50,839 INFO [RS:2;7c69a60bd8f6:34975 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-24T02:49:50,839 INFO [RS:2;7c69a60bd8f6:34975 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-24T02:49:50,839 INFO [regionserver/7c69a60bd8f6:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T02:49:50,839 INFO [RS:2;7c69a60bd8f6:34975 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-24T02:49:50,839 INFO [RS:2;7c69a60bd8f6:34975 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T02:49:50,839 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:49:50,840 INFO [RS:2;7c69a60bd8f6:34975 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34975 2024-11-24T02:49:50,840 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:49:50,840 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:49:50,840 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:49:50,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39123 is added to blk_1073741834_1010 (size=93) 2024-11-24T02:49:50,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35293 is added to blk_1073741834_1010 (size=93) 2024-11-24T02:49:50,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40851 is added to blk_1073741834_1010 (size=93) 2024-11-24T02:49:50,847 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34451-0x1016abf41270000, quorum=127.0.0.1:60680, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T02:49:50,847 INFO [RS:2;7c69a60bd8f6:34975 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T02:49:50,847 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34975-0x1016abf41270003, quorum=127.0.0.1:60680, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/7c69a60bd8f6,34975,1732416587480 2024-11-24T02:49:50,851 DEBUG [RS:0;7c69a60bd8f6:46185 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d/oldWALs 2024-11-24T02:49:50,852 INFO [RS:0;7c69a60bd8f6:46185 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7c69a60bd8f6%2C46185%2C1732416587362:(num 1732416588651) 2024-11-24T02:49:50,852 DEBUG [RS:0;7c69a60bd8f6:46185 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T02:49:50,852 INFO [RS:0;7c69a60bd8f6:46185 {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T02:49:50,852 INFO [RS:0;7c69a60bd8f6:46185 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T02:49:50,852 INFO [RS:0;7c69a60bd8f6:46185 {}] hbase.ChoreService(370): Chore service for: regionserver/7c69a60bd8f6:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-24T02:49:50,852 INFO [RS:0;7c69a60bd8f6:46185 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T02:49:50,852 INFO [regionserver/7c69a60bd8f6:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T02:49:50,852 INFO [RS:0;7c69a60bd8f6:46185 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46185 2024-11-24T02:49:50,857 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [7c69a60bd8f6,34975,1732416587480] 2024-11-24T02:49:50,868 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46185-0x1016abf41270001, quorum=127.0.0.1:60680, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/7c69a60bd8f6,46185,1732416587362 2024-11-24T02:49:50,868 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34451-0x1016abf41270000, quorum=127.0.0.1:60680, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T02:49:50,868 INFO [RS:0;7c69a60bd8f6:46185 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T02:49:50,878 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/7c69a60bd8f6,34975,1732416587480 already deleted, retry=false 2024-11-24T02:49:50,878 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 7c69a60bd8f6,34975,1732416587480 expired; onlineServers=1 2024-11-24T02:49:50,889 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [7c69a60bd8f6,46185,1732416587362] 2024-11-24T02:49:50,899 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/7c69a60bd8f6,46185,1732416587362 already deleted, retry=false 2024-11-24T02:49:50,899 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 7c69a60bd8f6,46185,1732416587362 expired; onlineServers=0 2024-11-24T02:49:50,899 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '7c69a60bd8f6,34451,1732416587166' ***** 2024-11-24T02:49:50,899 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-24T02:49:50,899 INFO [M:0;7c69a60bd8f6:34451 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T02:49:50,900 INFO [M:0;7c69a60bd8f6:34451 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T02:49:50,900 DEBUG [M:0;7c69a60bd8f6:34451 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-24T02:49:50,900 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-24T02:49:50,900 DEBUG [M:0;7c69a60bd8f6:34451 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-24T02:49:50,900 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster-HFileCleaner.small.0-1732416588340 {}] cleaner.HFileCleaner(306): Exit Thread[master/7c69a60bd8f6:0:becomeActiveMaster-HFileCleaner.small.0-1732416588340,5,FailOnTimeoutGroup] 2024-11-24T02:49:50,900 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster-HFileCleaner.large.0-1732416588338 {}] cleaner.HFileCleaner(306): Exit Thread[master/7c69a60bd8f6:0:becomeActiveMaster-HFileCleaner.large.0-1732416588338,5,FailOnTimeoutGroup] 2024-11-24T02:49:50,900 INFO [M:0;7c69a60bd8f6:34451 {}] hbase.ChoreService(370): Chore service for: master/7c69a60bd8f6:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-24T02:49:50,901 INFO [M:0;7c69a60bd8f6:34451 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T02:49:50,901 DEBUG [M:0;7c69a60bd8f6:34451 {}] master.HMaster(1795): Stopping service threads 2024-11-24T02:49:50,901 INFO [M:0;7c69a60bd8f6:34451 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-24T02:49:50,901 INFO [M:0;7c69a60bd8f6:34451 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T02:49:50,902 INFO [M:0;7c69a60bd8f6:34451 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-24T02:49:50,902 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-24T02:49:50,910 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34451-0x1016abf41270000, quorum=127.0.0.1:60680, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-24T02:49:50,910 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34451-0x1016abf41270000, quorum=127.0.0.1:60680, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:49:50,910 DEBUG [M:0;7c69a60bd8f6:34451 {}] zookeeper.ZKUtil(347): master:34451-0x1016abf41270000, quorum=127.0.0.1:60680, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-24T02:49:50,910 WARN [M:0;7c69a60bd8f6:34451 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-24T02:49:50,911 INFO [M:0;7c69a60bd8f6:34451 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:35433/user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d/.lastflushedseqids 2024-11-24T02:49:50,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35293 is added to blk_1073741843_1019 (size=127) 2024-11-24T02:49:50,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39123 is added to blk_1073741843_1019 (size=127) 2024-11-24T02:49:50,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40851 is added to blk_1073741843_1019 (size=127) 2024-11-24T02:49:50,931 INFO [M:0;7c69a60bd8f6:34451 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-24T02:49:50,931 INFO [M:0;7c69a60bd8f6:34451 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-24T02:49:50,932 DEBUG [M:0;7c69a60bd8f6:34451 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T02:49:50,932 INFO [M:0;7c69a60bd8f6:34451 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T02:49:50,932 DEBUG [M:0;7c69a60bd8f6:34451 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T02:49:50,932 DEBUG [M:0;7c69a60bd8f6:34451 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T02:49:50,932 DEBUG [M:0;7c69a60bd8f6:34451 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T02:49:50,932 INFO [M:0;7c69a60bd8f6:34451 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.84 KB heapSize=34.13 KB 2024-11-24T02:49:50,952 DEBUG [M:0;7c69a60bd8f6:34451 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35433/user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/5bf9523bf3e846ad8bc6f5aa3f2ce12f is 82, key is hbase:meta,,1/info:regioninfo/1732416589214/Put/seqid=0 2024-11-24T02:49:50,957 INFO [RS:2;7c69a60bd8f6:34975 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T02:49:50,957 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34975-0x1016abf41270003, quorum=127.0.0.1:60680, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T02:49:50,957 INFO [RS:2;7c69a60bd8f6:34975 {}] regionserver.HRegionServer(1031): Exiting; stopping=7c69a60bd8f6,34975,1732416587480; zookeeper connection closed. 2024-11-24T02:49:50,957 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34975-0x1016abf41270003, quorum=127.0.0.1:60680, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T02:49:50,959 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1a058beb {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1a058beb 2024-11-24T02:49:50,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35293 is added to blk_1073741844_1020 (size=5672) 2024-11-24T02:49:50,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40851 is added to blk_1073741844_1020 (size=5672) 2024-11-24T02:49:50,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39123 is added to blk_1073741844_1020 (size=5672) 2024-11-24T02:49:50,965 INFO [M:0;7c69a60bd8f6:34451 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:35433/user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/5bf9523bf3e846ad8bc6f5aa3f2ce12f 2024-11-24T02:49:50,989 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46185-0x1016abf41270001, quorum=127.0.0.1:60680, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T02:49:50,989 INFO [RS:0;7c69a60bd8f6:46185 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T02:49:50,989 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46185-0x1016abf41270001, quorum=127.0.0.1:60680, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T02:49:50,989 INFO [RS:0;7c69a60bd8f6:46185 {}] regionserver.HRegionServer(1031): Exiting; stopping=7c69a60bd8f6,46185,1732416587362; zookeeper connection closed. 2024-11-24T02:49:50,993 DEBUG [M:0;7c69a60bd8f6:34451 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35433/user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/69229cb5256c44f8965dfa7cec88cc6f is 749, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732416589952/Put/seqid=0 2024-11-24T02:49:50,994 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@a176f1b {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@a176f1b 2024-11-24T02:49:50,994 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-11-24T02:49:51,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35293 is added to blk_1073741845_1021 (size=6440) 2024-11-24T02:49:51,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40851 is added to blk_1073741845_1021 (size=6440) 2024-11-24T02:49:51,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39123 is added to blk_1073741845_1021 (size=6440) 2024-11-24T02:49:51,004 INFO [M:0;7c69a60bd8f6:34451 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.16 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:35433/user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/69229cb5256c44f8965dfa7cec88cc6f 2024-11-24T02:49:51,036 DEBUG [M:0;7c69a60bd8f6:34451 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35433/user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/fd1a6ec01c574acaa10f32a65341a4f2 is 69, key is 7c69a60bd8f6,34975,1732416587480/rs:state/1732416588356/Put/seqid=0 2024-11-24T02:49:51,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39123 is added to blk_1073741846_1022 (size=5294) 2024-11-24T02:49:51,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40851 is added to blk_1073741846_1022 (size=5294) 2024-11-24T02:49:51,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35293 is added to blk_1073741846_1022 (size=5294) 2024-11-24T02:49:51,060 INFO [M:0;7c69a60bd8f6:34451 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:35433/user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/fd1a6ec01c574acaa10f32a65341a4f2 2024-11-24T02:49:51,070 DEBUG [M:0;7c69a60bd8f6:34451 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35433/user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/5bf9523bf3e846ad8bc6f5aa3f2ce12f as hdfs://localhost:35433/user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/5bf9523bf3e846ad8bc6f5aa3f2ce12f 2024-11-24T02:49:51,078 INFO [M:0;7c69a60bd8f6:34451 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35433/user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/5bf9523bf3e846ad8bc6f5aa3f2ce12f, entries=8, sequenceid=72, filesize=5.5 K 2024-11-24T02:49:51,080 DEBUG [M:0;7c69a60bd8f6:34451 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35433/user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/69229cb5256c44f8965dfa7cec88cc6f as hdfs://localhost:35433/user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/69229cb5256c44f8965dfa7cec88cc6f 2024-11-24T02:49:51,088 INFO [M:0;7c69a60bd8f6:34451 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35433/user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/69229cb5256c44f8965dfa7cec88cc6f, entries=8, sequenceid=72, filesize=6.3 K 2024-11-24T02:49:51,089 DEBUG [M:0;7c69a60bd8f6:34451 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35433/user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/fd1a6ec01c574acaa10f32a65341a4f2 as hdfs://localhost:35433/user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/fd1a6ec01c574acaa10f32a65341a4f2 2024-11-24T02:49:51,101 INFO [M:0;7c69a60bd8f6:34451 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35433/user/jenkins/test-data/f25e9f6c-67c2-1c09-c2dc-b37f125e887d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/fd1a6ec01c574acaa10f32a65341a4f2, entries=3, sequenceid=72, filesize=5.2 K 2024-11-24T02:49:51,103 INFO [M:0;7c69a60bd8f6:34451 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.84 KB/27483, heapSize ~33.83 KB/34640, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 171ms, sequenceid=72, compaction requested=false 2024-11-24T02:49:51,109 INFO [M:0;7c69a60bd8f6:34451 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T02:49:51,109 DEBUG [M:0;7c69a60bd8f6:34451 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732416590932Disabling compacts and flushes for region at 1732416590932Disabling writes for close at 1732416590932Obtaining lock to block concurrent updates at 1732416590932Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732416590932Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27483, getHeapSize=34880, getOffHeapSize=0, getCellsCount=85 at 1732416590933 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732416590933Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732416590934 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732416590950 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732416590951 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732416590973 (+22 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732416590992 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732416590992Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732416591013 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732416591036 (+23 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732416591036Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2003ad72: reopening flushed file at 1732416591068 (+32 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@a96d8f3: reopening flushed file at 1732416591078 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@41328a32: reopening flushed file at 1732416591088 (+10 ms)Finished flush of dataSize ~26.84 KB/27483, heapSize ~33.83 KB/34640, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 171ms, sequenceid=72, compaction requested=false at 1732416591103 (+15 ms)Writing region close event to WAL at 1732416591109 (+6 ms)Closed at 1732416591109 2024-11-24T02:49:51,110 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:49:51,111 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:49:51,111 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:49:51,111 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:49:51,111 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:49:51,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40851 is added to blk_1073741830_1006 (size=32686) 2024-11-24T02:49:51,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39123 is added to blk_1073741830_1006 (size=32686) 2024-11-24T02:49:51,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35293 is added to blk_1073741830_1006 (size=32686) 2024-11-24T02:49:51,116 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T02:49:51,116 INFO [M:0;7c69a60bd8f6:34451 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-24T02:49:51,116 INFO [M:0;7c69a60bd8f6:34451 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34451 2024-11-24T02:49:51,117 INFO [M:0;7c69a60bd8f6:34451 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T02:49:51,230 INFO [M:0;7c69a60bd8f6:34451 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T02:49:51,230 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34451-0x1016abf41270000, quorum=127.0.0.1:60680, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T02:49:51,230 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34451-0x1016abf41270000, quorum=127.0.0.1:60680, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T02:49:51,233 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@66690135{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T02:49:51,233 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@55fefad1{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T02:49:51,233 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T02:49:51,234 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7f04037c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T02:49:51,234 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7aaeb6cf{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a3b69e9-b15f-c8cc-be43-f65f8860f049/hadoop.log.dir/,STOPPED} 2024-11-24T02:49:51,235 WARN [BP-1178940285-172.17.0.2-1732416584320 heartbeating to localhost/127.0.0.1:35433 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T02:49:51,235 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T02:49:51,235 WARN [BP-1178940285-172.17.0.2-1732416584320 heartbeating to localhost/127.0.0.1:35433 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1178940285-172.17.0.2-1732416584320 (Datanode Uuid b0ffa974-e89e-47e0-bf1a-46bf3b9cf3bd) service to localhost/127.0.0.1:35433 2024-11-24T02:49:51,235 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T02:49:51,236 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a3b69e9-b15f-c8cc-be43-f65f8860f049/cluster_e3ba48ec-b8c0-db74-3962-1ace868bf839/data/data5/current/BP-1178940285-172.17.0.2-1732416584320 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T02:49:51,236 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a3b69e9-b15f-c8cc-be43-f65f8860f049/cluster_e3ba48ec-b8c0-db74-3962-1ace868bf839/data/data6/current/BP-1178940285-172.17.0.2-1732416584320 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T02:49:51,236 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T02:49:51,239 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3b12351d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T02:49:51,239 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1841cfc0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T02:49:51,239 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T02:49:51,239 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@895c7e7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T02:49:51,240 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@15be761f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a3b69e9-b15f-c8cc-be43-f65f8860f049/hadoop.log.dir/,STOPPED} 2024-11-24T02:49:51,241 WARN [BP-1178940285-172.17.0.2-1732416584320 heartbeating to localhost/127.0.0.1:35433 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T02:49:51,241 WARN [BP-1178940285-172.17.0.2-1732416584320 heartbeating to localhost/127.0.0.1:35433 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1178940285-172.17.0.2-1732416584320 (Datanode Uuid b0b35602-1ef4-4d5d-8ddb-26cfc4202eb0) service to localhost/127.0.0.1:35433 2024-11-24T02:49:51,241 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T02:49:51,241 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T02:49:51,241 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a3b69e9-b15f-c8cc-be43-f65f8860f049/cluster_e3ba48ec-b8c0-db74-3962-1ace868bf839/data/data3/current/BP-1178940285-172.17.0.2-1732416584320 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T02:49:51,242 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a3b69e9-b15f-c8cc-be43-f65f8860f049/cluster_e3ba48ec-b8c0-db74-3962-1ace868bf839/data/data4/current/BP-1178940285-172.17.0.2-1732416584320 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T02:49:51,242 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T02:49:51,245 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4095cdc1{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T02:49:51,245 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@63241e03{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T02:49:51,245 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T02:49:51,245 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@23e049cf{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T02:49:51,246 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@35a7e23a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a3b69e9-b15f-c8cc-be43-f65f8860f049/hadoop.log.dir/,STOPPED} 2024-11-24T02:49:51,247 WARN [BP-1178940285-172.17.0.2-1732416584320 heartbeating to localhost/127.0.0.1:35433 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T02:49:51,247 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T02:49:51,247 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T02:49:51,247 WARN [BP-1178940285-172.17.0.2-1732416584320 heartbeating to localhost/127.0.0.1:35433 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1178940285-172.17.0.2-1732416584320 (Datanode Uuid c14c80c1-6001-4dd0-a345-cb9c1d96fcd2) service to localhost/127.0.0.1:35433 2024-11-24T02:49:51,247 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a3b69e9-b15f-c8cc-be43-f65f8860f049/cluster_e3ba48ec-b8c0-db74-3962-1ace868bf839/data/data1/current/BP-1178940285-172.17.0.2-1732416584320 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T02:49:51,248 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a3b69e9-b15f-c8cc-be43-f65f8860f049/cluster_e3ba48ec-b8c0-db74-3962-1ace868bf839/data/data2/current/BP-1178940285-172.17.0.2-1732416584320 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T02:49:51,248 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T02:49:51,253 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@34f8b36d{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T02:49:51,254 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5136e662{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T02:49:51,255 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T02:49:51,255 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4141bee1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T02:49:51,255 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1fca1d0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3a3b69e9-b15f-c8cc-be43-f65f8860f049/hadoop.log.dir/,STOPPED} 2024-11-24T02:49:51,265 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-24T02:49:51,299 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-24T02:49:51,308 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=174 (was 110) - Thread LEAK? -, OpenFileDescriptor=524 (was 374) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=394 (was 366) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=10456 (was 10859)