2024-11-07 12:14:53,768 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@4d1c005e 2024-11-07 12:14:53,794 main DEBUG Took 0.021837 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-07 12:14:53,794 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-07 12:14:53,795 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-07 12:14:53,796 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-07 12:14:53,798 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 12:14:53,841 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-07 12:14:53,858 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 12:14:53,860 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 12:14:53,861 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 12:14:53,862 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 12:14:53,863 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 12:14:53,863 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 12:14:53,865 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 12:14:53,865 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 12:14:53,866 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 12:14:53,867 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 12:14:53,868 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 12:14:53,869 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 12:14:53,870 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 12:14:53,870 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 12:14:53,871 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 12:14:53,872 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 12:14:53,872 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 12:14:53,873 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 12:14:53,874 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 12:14:53,874 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 12:14:53,875 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 12:14:53,875 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 12:14:53,876 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 12:14:53,877 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 12:14:53,877 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 12:14:53,878 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-07 12:14:53,880 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 12:14:53,882 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-07 12:14:53,885 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-07 12:14:53,886 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-07 12:14:53,888 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-07 12:14:53,889 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-07 12:14:53,902 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-07 12:14:53,909 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-07 12:14:53,914 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-07 12:14:53,915 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-07 12:14:53,916 main DEBUG createAppenders(={Console}) 2024-11-07 12:14:53,918 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@4d1c005e initialized 2024-11-07 12:14:53,918 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@4d1c005e 2024-11-07 12:14:53,919 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@4d1c005e OK. 2024-11-07 12:14:53,923 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-07 12:14:53,924 main DEBUG OutputStream closed 2024-11-07 12:14:53,927 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-07 12:14:53,927 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-07 12:14:53,928 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@6f46426d OK 2024-11-07 12:14:54,060 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-07 12:14:54,063 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-07 12:14:54,065 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-07 12:14:54,066 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-07 12:14:54,067 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-07 12:14:54,068 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-07 12:14:54,068 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-07 12:14:54,069 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-07 12:14:54,069 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-07 12:14:54,070 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-07 12:14:54,070 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-07 12:14:54,071 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-07 12:14:54,071 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-07 12:14:54,072 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-07 12:14:54,072 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-07 12:14:54,072 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-07 12:14:54,073 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-07 12:14:54,074 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-07 12:14:54,077 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-07 12:14:54,078 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-logging/target/hbase-logging-2.7.0-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@35432107) with optional ClassLoader: null 2024-11-07 12:14:54,078 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-07 12:14:54,079 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@35432107] started OK. 2024-11-07T12:14:54,106 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC timeout: 26 mins 2024-11-07 12:14:54,111 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-07 12:14:54,112 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-07T12:14:54,456 DEBUG [main {}] hbase.HBaseTestingUtility(348): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8d1e50b3-3fdd-78ef-49e3-a9f951961ca0 2024-11-07T12:14:54,507 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8d1e50b3-3fdd-78ef-49e3-a9f951961ca0/cluster_ff6fcda1-4aaa-0136-7e73-55f15224e83f, deleteOnExit=true 2024-11-07T12:14:54,509 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8d1e50b3-3fdd-78ef-49e3-a9f951961ca0/test.cache.data in system properties and HBase conf 2024-11-07T12:14:54,510 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8d1e50b3-3fdd-78ef-49e3-a9f951961ca0/hadoop.tmp.dir in system properties and HBase conf 2024-11-07T12:14:54,511 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8d1e50b3-3fdd-78ef-49e3-a9f951961ca0/hadoop.log.dir in system properties and HBase conf 2024-11-07T12:14:54,512 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8d1e50b3-3fdd-78ef-49e3-a9f951961ca0/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-07T12:14:54,513 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8d1e50b3-3fdd-78ef-49e3-a9f951961ca0/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-07T12:14:54,513 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-11-07T12:14:54,650 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-07T12:14:54,784 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-07T12:14:54,790 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8d1e50b3-3fdd-78ef-49e3-a9f951961ca0/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-07T12:14:54,791 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8d1e50b3-3fdd-78ef-49e3-a9f951961ca0/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-07T12:14:54,792 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8d1e50b3-3fdd-78ef-49e3-a9f951961ca0/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-07T12:14:54,793 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8d1e50b3-3fdd-78ef-49e3-a9f951961ca0/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-07T12:14:54,794 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8d1e50b3-3fdd-78ef-49e3-a9f951961ca0/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-07T12:14:54,795 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8d1e50b3-3fdd-78ef-49e3-a9f951961ca0/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-07T12:14:54,795 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8d1e50b3-3fdd-78ef-49e3-a9f951961ca0/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-07T12:14:54,796 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8d1e50b3-3fdd-78ef-49e3-a9f951961ca0/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-07T12:14:54,797 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8d1e50b3-3fdd-78ef-49e3-a9f951961ca0/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-07T12:14:54,798 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8d1e50b3-3fdd-78ef-49e3-a9f951961ca0/nfs.dump.dir in system properties and HBase conf 2024-11-07T12:14:54,798 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8d1e50b3-3fdd-78ef-49e3-a9f951961ca0/java.io.tmpdir in system properties and HBase conf 2024-11-07T12:14:54,799 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8d1e50b3-3fdd-78ef-49e3-a9f951961ca0/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-07T12:14:54,800 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8d1e50b3-3fdd-78ef-49e3-a9f951961ca0/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-07T12:14:54,801 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8d1e50b3-3fdd-78ef-49e3-a9f951961ca0/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-07T12:14:56,098 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-07T12:14:56,198 INFO [Time-limited test {}] log.Log(170): Logging initialized @3319ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-07T12:14:56,310 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T12:14:56,441 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-07T12:14:56,469 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-07T12:14:56,470 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-07T12:14:56,472 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-07T12:14:56,493 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T12:14:56,497 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6fcea601{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8d1e50b3-3fdd-78ef-49e3-a9f951961ca0/hadoop.log.dir/,AVAILABLE} 2024-11-07T12:14:56,499 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2428f4dd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-07T12:14:56,762 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7eaa7387{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8d1e50b3-3fdd-78ef-49e3-a9f951961ca0/java.io.tmpdir/jetty-localhost-39589-hadoop-hdfs-3_4_1-tests_jar-_-any-8316548028417834206/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-07T12:14:56,772 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6a51423c{HTTP/1.1, (http/1.1)}{localhost:39589} 2024-11-07T12:14:56,772 INFO [Time-limited test {}] server.Server(415): Started @3895ms 2024-11-07T12:14:57,449 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T12:14:57,460 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-07T12:14:57,463 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-07T12:14:57,463 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-07T12:14:57,464 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-07T12:14:57,465 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@15576943{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8d1e50b3-3fdd-78ef-49e3-a9f951961ca0/hadoop.log.dir/,AVAILABLE} 2024-11-07T12:14:57,466 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@73696fa8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-07T12:14:57,587 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@265f851e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8d1e50b3-3fdd-78ef-49e3-a9f951961ca0/java.io.tmpdir/jetty-localhost-35135-hadoop-hdfs-3_4_1-tests_jar-_-any-17910909462833839739/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:14:57,588 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1c108191{HTTP/1.1, (http/1.1)}{localhost:35135} 2024-11-07T12:14:57,588 INFO [Time-limited test {}] server.Server(415): Started @4710ms 2024-11-07T12:14:57,651 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-07T12:14:57,828 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T12:14:57,842 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-07T12:14:57,848 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-07T12:14:57,848 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-07T12:14:57,849 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-07T12:14:57,855 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5fed20b9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8d1e50b3-3fdd-78ef-49e3-a9f951961ca0/hadoop.log.dir/,AVAILABLE} 2024-11-07T12:14:57,856 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1a863ca9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-07T12:14:58,003 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7aa37120{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8d1e50b3-3fdd-78ef-49e3-a9f951961ca0/java.io.tmpdir/jetty-localhost-41037-hadoop-hdfs-3_4_1-tests_jar-_-any-8831985434750945333/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:14:58,004 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@926802c{HTTP/1.1, (http/1.1)}{localhost:41037} 2024-11-07T12:14:58,004 INFO [Time-limited test {}] server.Server(415): Started @5126ms 2024-11-07T12:14:58,007 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-07T12:14:58,063 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T12:14:58,070 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-07T12:14:58,076 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-07T12:14:58,076 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-07T12:14:58,076 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-07T12:14:58,077 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5ed07bd7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8d1e50b3-3fdd-78ef-49e3-a9f951961ca0/hadoop.log.dir/,AVAILABLE} 2024-11-07T12:14:58,078 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@24758d60{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-07T12:14:58,219 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@62da31d0{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8d1e50b3-3fdd-78ef-49e3-a9f951961ca0/java.io.tmpdir/jetty-localhost-34415-hadoop-hdfs-3_4_1-tests_jar-_-any-9710581595260524430/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:14:58,220 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1f02298f{HTTP/1.1, (http/1.1)}{localhost:34415} 2024-11-07T12:14:58,220 INFO [Time-limited test {}] server.Server(415): Started @5343ms 2024-11-07T12:14:58,223 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-07T12:14:59,043 WARN [Thread-122 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8d1e50b3-3fdd-78ef-49e3-a9f951961ca0/cluster_ff6fcda1-4aaa-0136-7e73-55f15224e83f/dfs/data/data3/current/BP-972601932-172.17.0.2-1730981695590/current, will proceed with Du for space computation calculation, 2024-11-07T12:14:59,043 WARN [Thread-124 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8d1e50b3-3fdd-78ef-49e3-a9f951961ca0/cluster_ff6fcda1-4aaa-0136-7e73-55f15224e83f/dfs/data/data1/current/BP-972601932-172.17.0.2-1730981695590/current, will proceed with Du for space computation calculation, 2024-11-07T12:14:59,043 WARN [Thread-125 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8d1e50b3-3fdd-78ef-49e3-a9f951961ca0/cluster_ff6fcda1-4aaa-0136-7e73-55f15224e83f/dfs/data/data2/current/BP-972601932-172.17.0.2-1730981695590/current, will proceed with Du for space computation calculation, 2024-11-07T12:14:59,043 WARN [Thread-123 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8d1e50b3-3fdd-78ef-49e3-a9f951961ca0/cluster_ff6fcda1-4aaa-0136-7e73-55f15224e83f/dfs/data/data4/current/BP-972601932-172.17.0.2-1730981695590/current, will proceed with Du for space computation calculation, 2024-11-07T12:14:59,075 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-07T12:14:59,075 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-07T12:14:59,114 WARN [Thread-142 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8d1e50b3-3fdd-78ef-49e3-a9f951961ca0/cluster_ff6fcda1-4aaa-0136-7e73-55f15224e83f/dfs/data/data5/current/BP-972601932-172.17.0.2-1730981695590/current, will proceed with Du for space computation calculation, 2024-11-07T12:14:59,114 WARN [Thread-143 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8d1e50b3-3fdd-78ef-49e3-a9f951961ca0/cluster_ff6fcda1-4aaa-0136-7e73-55f15224e83f/dfs/data/data6/current/BP-972601932-172.17.0.2-1730981695590/current, will proceed with Du for space computation calculation, 2024-11-07T12:14:59,123 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x91ee03e29ae668d7 with lease ID 0xf6fe6915e82bec13: Processing first storage report for DS-a5e60a7c-8a0b-4c0a-bc12-4b27774bc2ad from datanode DatanodeRegistration(127.0.0.1:32897, datanodeUuid=73adb480-ce5d-42af-ae47-ce3a4d13bc85, infoPort=39615, infoSecurePort=0, ipcPort=44645, storageInfo=lv=-57;cid=testClusterID;nsid=549366304;c=1730981695590) 2024-11-07T12:14:59,124 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x91ee03e29ae668d7 with lease ID 0xf6fe6915e82bec13: from storage DS-a5e60a7c-8a0b-4c0a-bc12-4b27774bc2ad node DatanodeRegistration(127.0.0.1:32897, datanodeUuid=73adb480-ce5d-42af-ae47-ce3a4d13bc85, infoPort=39615, infoSecurePort=0, ipcPort=44645, storageInfo=lv=-57;cid=testClusterID;nsid=549366304;c=1730981695590), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-07T12:14:59,124 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xeba72e042f7b66ee with lease ID 0xf6fe6915e82bec12: Processing first storage report for DS-72ab5020-bfed-4ab2-87cf-9285a3b067ec from datanode DatanodeRegistration(127.0.0.1:34323, datanodeUuid=d2563bbf-23c1-4a30-bde2-16fcc338cd16, infoPort=41615, infoSecurePort=0, ipcPort=34481, storageInfo=lv=-57;cid=testClusterID;nsid=549366304;c=1730981695590) 2024-11-07T12:14:59,124 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xeba72e042f7b66ee with lease ID 0xf6fe6915e82bec12: from storage DS-72ab5020-bfed-4ab2-87cf-9285a3b067ec node DatanodeRegistration(127.0.0.1:34323, datanodeUuid=d2563bbf-23c1-4a30-bde2-16fcc338cd16, infoPort=41615, infoSecurePort=0, ipcPort=34481, storageInfo=lv=-57;cid=testClusterID;nsid=549366304;c=1730981695590), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:14:59,124 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x91ee03e29ae668d7 with lease ID 0xf6fe6915e82bec13: Processing first storage report for DS-cb1176ab-6ddb-4198-bc3d-d4bee6c95c0d from datanode DatanodeRegistration(127.0.0.1:32897, datanodeUuid=73adb480-ce5d-42af-ae47-ce3a4d13bc85, infoPort=39615, infoSecurePort=0, ipcPort=44645, storageInfo=lv=-57;cid=testClusterID;nsid=549366304;c=1730981695590) 2024-11-07T12:14:59,125 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x91ee03e29ae668d7 with lease ID 0xf6fe6915e82bec13: from storage DS-cb1176ab-6ddb-4198-bc3d-d4bee6c95c0d node DatanodeRegistration(127.0.0.1:32897, datanodeUuid=73adb480-ce5d-42af-ae47-ce3a4d13bc85, infoPort=39615, infoSecurePort=0, ipcPort=44645, storageInfo=lv=-57;cid=testClusterID;nsid=549366304;c=1730981695590), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:14:59,125 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xeba72e042f7b66ee with lease ID 0xf6fe6915e82bec12: Processing first storage report for DS-2d9a0217-ac5b-45cf-90a8-4844d2210217 from datanode DatanodeRegistration(127.0.0.1:34323, datanodeUuid=d2563bbf-23c1-4a30-bde2-16fcc338cd16, infoPort=41615, infoSecurePort=0, ipcPort=34481, storageInfo=lv=-57;cid=testClusterID;nsid=549366304;c=1730981695590) 2024-11-07T12:14:59,125 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xeba72e042f7b66ee with lease ID 0xf6fe6915e82bec12: from storage DS-2d9a0217-ac5b-45cf-90a8-4844d2210217 node DatanodeRegistration(127.0.0.1:34323, datanodeUuid=d2563bbf-23c1-4a30-bde2-16fcc338cd16, infoPort=41615, infoSecurePort=0, ipcPort=34481, storageInfo=lv=-57;cid=testClusterID;nsid=549366304;c=1730981695590), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:14:59,137 WARN [Thread-103 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-07T12:14:59,143 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xde2f6b2e2763ea2e with lease ID 0xf6fe6915e82bec14: Processing first storage report for DS-6d9f3156-9819-4939-aab9-07f566ecbfb9 from datanode DatanodeRegistration(127.0.0.1:40081, datanodeUuid=fdd1d357-0485-4b1a-bc24-8e77d0151f2a, infoPort=33295, infoSecurePort=0, ipcPort=45783, storageInfo=lv=-57;cid=testClusterID;nsid=549366304;c=1730981695590) 2024-11-07T12:14:59,143 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xde2f6b2e2763ea2e with lease ID 0xf6fe6915e82bec14: from storage DS-6d9f3156-9819-4939-aab9-07f566ecbfb9 node DatanodeRegistration(127.0.0.1:40081, datanodeUuid=fdd1d357-0485-4b1a-bc24-8e77d0151f2a, infoPort=33295, infoSecurePort=0, ipcPort=45783, storageInfo=lv=-57;cid=testClusterID;nsid=549366304;c=1730981695590), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:14:59,143 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xde2f6b2e2763ea2e with lease ID 0xf6fe6915e82bec14: Processing first storage report for DS-c04ea0f4-a54b-4cdf-84a3-fa1cd9ed6635 from datanode DatanodeRegistration(127.0.0.1:40081, datanodeUuid=fdd1d357-0485-4b1a-bc24-8e77d0151f2a, infoPort=33295, infoSecurePort=0, ipcPort=45783, storageInfo=lv=-57;cid=testClusterID;nsid=549366304;c=1730981695590) 2024-11-07T12:14:59,143 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xde2f6b2e2763ea2e with lease ID 0xf6fe6915e82bec14: from storage DS-c04ea0f4-a54b-4cdf-84a3-fa1cd9ed6635 node DatanodeRegistration(127.0.0.1:40081, datanodeUuid=fdd1d357-0485-4b1a-bc24-8e77d0151f2a, infoPort=33295, infoSecurePort=0, ipcPort=45783, storageInfo=lv=-57;cid=testClusterID;nsid=549366304;c=1730981695590), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:14:59,230 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8d1e50b3-3fdd-78ef-49e3-a9f951961ca0 2024-11-07T12:14:59,298 WARN [Time-limited test {}] erasurecode.ErasureCodeNative(55): ISA-L support is not available in your platform... using builtin-java codec where applicable 2024-11-07T12:14:59,351 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=159, OpenFileDescriptor=392, MaxFileDescriptor=1048576, SystemLoadAverage=295, ProcessCount=11, AvailableMemoryMB=5016 2024-11-07T12:14:59,353 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-07T12:14:59,353 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1143): NOT STARTING DFS 2024-11-07T12:14:59,429 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8d1e50b3-3fdd-78ef-49e3-a9f951961ca0/cluster_ff6fcda1-4aaa-0136-7e73-55f15224e83f/zookeeper_0, clientPort=63737, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8d1e50b3-3fdd-78ef-49e3-a9f951961ca0/cluster_ff6fcda1-4aaa-0136-7e73-55f15224e83f/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8d1e50b3-3fdd-78ef-49e3-a9f951961ca0/cluster_ff6fcda1-4aaa-0136-7e73-55f15224e83f/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-07T12:14:59,439 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=63737 2024-11-07T12:14:59,451 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:14:59,453 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:14:59,519 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-07T12:14:59,520 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-07T12:14:59,559 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-322704881_22 at /127.0.0.1:44784 [Receiving block BP-972601932-172.17.0.2-1730981695590:blk_-9223372036854775792_1001] {}] datanode.DataXceiver(331): 127.0.0.1:32897:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44784 dst: /127.0.0.1:32897 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:14:59,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32897 is added to blk_-9223372036854775792_1002 (size=7) 2024-11-07T12:14:59,976 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-07T12:14:59,986 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:42781/user/jenkins/test-data/e9c8b8f4-fd78-acb0-c9f5-a2284facf3e4 with version=8 2024-11-07T12:14:59,987 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1460): Setting hbase.fs.tmp.dir to hdfs://localhost:42781/user/jenkins/test-data/e9c8b8f4-fd78-acb0-c9f5-a2284facf3e4/hbase-staging 2024-11-07T12:15:00,141 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-07T12:15:00,459 INFO [Time-limited test {}] client.ConnectionUtils(129): master/8dcb99358a44:0 server-side Connection retries=45 2024-11-07T12:15:00,487 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T12:15:00,488 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-07T12:15:00,489 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-07T12:15:00,490 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T12:15:00,490 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-07T12:15:00,701 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-07T12:15:00,781 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-07T12:15:00,797 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-07T12:15:00,802 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-07T12:15:00,838 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 1034 (auto-detected) 2024-11-07T12:15:00,840 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-07T12:15:00,874 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:35579 2024-11-07T12:15:00,887 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:15:00,894 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:15:00,919 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:35579 connecting to ZooKeeper ensemble=127.0.0.1:63737 2024-11-07T12:15:01,025 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:355790x0, quorum=127.0.0.1:63737, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-07T12:15:01,030 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:35579-0x101168241c60000 connected 2024-11-07T12:15:01,150 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35579-0x101168241c60000, quorum=127.0.0.1:63737, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-07T12:15:01,155 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35579-0x101168241c60000, quorum=127.0.0.1:63737, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T12:15:01,159 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35579-0x101168241c60000, quorum=127.0.0.1:63737, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-07T12:15:01,169 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35579 2024-11-07T12:15:01,170 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35579 2024-11-07T12:15:01,174 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35579 2024-11-07T12:15:01,175 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35579 2024-11-07T12:15:01,176 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35579 2024-11-07T12:15:01,185 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:42781/user/jenkins/test-data/e9c8b8f4-fd78-acb0-c9f5-a2284facf3e4, hbase.cluster.distributed=false 2024-11-07T12:15:01,291 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/8dcb99358a44:0 server-side Connection retries=45 2024-11-07T12:15:01,291 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T12:15:01,292 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-07T12:15:01,292 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-07T12:15:01,292 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T12:15:01,293 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-07T12:15:01,296 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-07T12:15:01,300 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-07T12:15:01,302 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:37957 2024-11-07T12:15:01,305 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-07T12:15:01,322 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-07T12:15:01,323 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:15:01,327 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:15:01,336 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:37957 connecting to ZooKeeper ensemble=127.0.0.1:63737 2024-11-07T12:15:01,352 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:379570x0, quorum=127.0.0.1:63737, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-07T12:15:01,353 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:379570x0, quorum=127.0.0.1:63737, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-07T12:15:01,358 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:379570x0, quorum=127.0.0.1:63737, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T12:15:01,368 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:379570x0, quorum=127.0.0.1:63737, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-07T12:15:01,369 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37957-0x101168241c60001 connected 2024-11-07T12:15:01,381 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37957 2024-11-07T12:15:01,382 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37957 2024-11-07T12:15:01,385 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37957 2024-11-07T12:15:01,393 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37957 2024-11-07T12:15:01,395 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37957 2024-11-07T12:15:01,433 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/8dcb99358a44:0 server-side Connection retries=45 2024-11-07T12:15:01,433 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T12:15:01,433 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-07T12:15:01,434 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-07T12:15:01,434 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T12:15:01,434 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-07T12:15:01,434 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-07T12:15:01,435 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-07T12:15:01,442 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:44967 2024-11-07T12:15:01,443 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-07T12:15:01,451 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-07T12:15:01,455 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:15:01,459 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:15:01,467 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:44967 connecting to ZooKeeper ensemble=127.0.0.1:63737 2024-11-07T12:15:01,482 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:449670x0, quorum=127.0.0.1:63737, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-07T12:15:01,482 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:449670x0, quorum=127.0.0.1:63737, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-07T12:15:01,486 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44967-0x101168241c60002, quorum=127.0.0.1:63737, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T12:15:01,487 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44967-0x101168241c60002 connected 2024-11-07T12:15:01,487 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44967-0x101168241c60002, quorum=127.0.0.1:63737, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-07T12:15:01,492 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44967 2024-11-07T12:15:01,493 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44967 2024-11-07T12:15:01,497 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44967 2024-11-07T12:15:01,499 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44967 2024-11-07T12:15:01,501 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44967 2024-11-07T12:15:01,540 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/8dcb99358a44:0 server-side Connection retries=45 2024-11-07T12:15:01,541 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T12:15:01,541 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-07T12:15:01,541 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-07T12:15:01,541 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T12:15:01,541 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-07T12:15:01,542 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-07T12:15:01,542 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-07T12:15:01,559 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:33781 2024-11-07T12:15:01,560 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-07T12:15:01,584 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-07T12:15:01,586 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:15:01,590 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:15:01,595 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:33781 connecting to ZooKeeper ensemble=127.0.0.1:63737 2024-11-07T12:15:01,614 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:337810x0, quorum=127.0.0.1:63737, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-07T12:15:01,613 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:337810x0, quorum=127.0.0.1:63737, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-07T12:15:01,616 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33781-0x101168241c60003, quorum=127.0.0.1:63737, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T12:15:01,617 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33781-0x101168241c60003, quorum=127.0.0.1:63737, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-07T12:15:01,618 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33781-0x101168241c60003 connected 2024-11-07T12:15:01,625 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33781 2024-11-07T12:15:01,625 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33781 2024-11-07T12:15:01,634 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33781 2024-11-07T12:15:01,647 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33781 2024-11-07T12:15:01,653 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33781 2024-11-07T12:15:01,661 INFO [master/8dcb99358a44:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/8dcb99358a44,35579,1730981700133 2024-11-07T12:15:01,677 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37957-0x101168241c60001, quorum=127.0.0.1:63737, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T12:15:01,677 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35579-0x101168241c60000, quorum=127.0.0.1:63737, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T12:15:01,680 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33781-0x101168241c60003, quorum=127.0.0.1:63737, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T12:15:01,681 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44967-0x101168241c60002, quorum=127.0.0.1:63737, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T12:15:01,685 DEBUG [M:0;8dcb99358a44:35579 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;8dcb99358a44:35579 2024-11-07T12:15:01,688 DEBUG [master/8dcb99358a44:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35579-0x101168241c60000, quorum=127.0.0.1:63737, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/8dcb99358a44,35579,1730981700133 2024-11-07T12:15:01,738 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37957-0x101168241c60001, quorum=127.0.0.1:63737, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-07T12:15:01,738 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35579-0x101168241c60000, quorum=127.0.0.1:63737, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-07T12:15:01,738 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33781-0x101168241c60003, quorum=127.0.0.1:63737, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-07T12:15:01,739 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37957-0x101168241c60001, quorum=127.0.0.1:63737, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:15:01,739 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35579-0x101168241c60000, quorum=127.0.0.1:63737, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:15:01,739 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33781-0x101168241c60003, quorum=127.0.0.1:63737, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:15:01,738 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44967-0x101168241c60002, quorum=127.0.0.1:63737, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-07T12:15:01,739 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44967-0x101168241c60002, quorum=127.0.0.1:63737, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:15:01,743 DEBUG [master/8dcb99358a44:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35579-0x101168241c60000, quorum=127.0.0.1:63737, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-07T12:15:01,743 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:35579-0x101168241c60000, quorum=127.0.0.1:63737, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-07T12:15:01,745 INFO [master/8dcb99358a44:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/8dcb99358a44,35579,1730981700133 from backup master directory 2024-11-07T12:15:01,755 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35579-0x101168241c60000, quorum=127.0.0.1:63737, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/8dcb99358a44,35579,1730981700133 2024-11-07T12:15:01,755 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33781-0x101168241c60003, quorum=127.0.0.1:63737, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T12:15:01,755 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37957-0x101168241c60001, quorum=127.0.0.1:63737, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T12:15:01,755 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44967-0x101168241c60002, quorum=127.0.0.1:63737, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T12:15:01,755 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35579-0x101168241c60000, quorum=127.0.0.1:63737, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T12:15:01,755 WARN [master/8dcb99358a44:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-07T12:15:01,756 INFO [master/8dcb99358a44:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=8dcb99358a44,35579,1730981700133 2024-11-07T12:15:01,758 INFO [master/8dcb99358a44:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-07T12:15:01,770 INFO [master/8dcb99358a44:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-07T12:15:01,894 WARN [master/8dcb99358a44:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-07T12:15:01,894 WARN [master/8dcb99358a44:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-07T12:15:01,908 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-322704881_22 at /127.0.0.1:38372 [Receiving block BP-972601932-172.17.0.2-1730981695590:blk_-9223372036854775776_1003] {}] datanode.DataXceiver(331): 127.0.0.1:34323:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38372 dst: /127.0.0.1:34323 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:15:01,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34323 is added to blk_-9223372036854775776_1004 (size=42) 2024-11-07T12:15:01,930 WARN [master/8dcb99358a44:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-07T12:15:01,933 DEBUG [master/8dcb99358a44:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:42781/user/jenkins/test-data/e9c8b8f4-fd78-acb0-c9f5-a2284facf3e4/hbase.id with ID: 166392db-47d1-4fc0-ac4d-42ddfd73109a 2024-11-07T12:15:02,015 INFO [master/8dcb99358a44:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:15:02,059 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44967-0x101168241c60002, quorum=127.0.0.1:63737, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:15:02,059 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37957-0x101168241c60001, quorum=127.0.0.1:63737, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:15:02,059 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35579-0x101168241c60000, quorum=127.0.0.1:63737, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:15:02,060 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33781-0x101168241c60003, quorum=127.0.0.1:63737, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:15:02,085 WARN [master/8dcb99358a44:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-07T12:15:02,085 WARN [master/8dcb99358a44:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-07T12:15:02,103 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-322704881_22 at /127.0.0.1:38388 [Receiving block BP-972601932-172.17.0.2-1730981695590:blk_-9223372036854775760_1005] {}] datanode.DataXceiver(331): 127.0.0.1:34323:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38388 dst: /127.0.0.1:34323 java.io.InterruptedIOException: Interrupted receiveBlock at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:15:02,120 WARN [master/8dcb99358a44:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-07T12:15:02,146 INFO [master/8dcb99358a44:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-07T12:15:02,148 INFO [master/8dcb99358a44:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-07T12:15:02,163 INFO [master/8dcb99358a44:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-07T12:15:02,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34323 is added to blk_-9223372036854775789_1002 (size=7) 2024-11-07T12:15:02,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40081 is added to blk_-9223372036854775788_1002 (size=7) 2024-11-07T12:15:02,220 WARN [IPC Server handler 1 on default port 42781 {}] blockmanagement.BlockPlacementPolicyRackFaultTolerant(145): Only able to place 2 of total expected 3 (maxNodesPerRack=3, numOfReplicas=3) nodes evenly across racks, falling back to evenly place on the remaining racks. This may not guarantee rack-level fault tolerance. Please check if the racks are configured properly. 2024-11-07T12:15:02,221 WARN [IPC Server handler 1 on default port 42781 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-07T12:15:02,224 WARN [IPC Server handler 1 on default port 42781 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-07T12:15:02,225 WARN [IPC Server handler 1 on default port 42781 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-07T12:15:02,232 DEBUG [master/8dcb99358a44:0:becomeActiveMaster {}] util.FSTableDescriptors(563): Failed write hdfs://localhost:42781/user/jenkins/test-data/e9c8b8f4-fd78-acb0-c9f5-a2284facf3e4/MasterData/data/master/store/.tabledesc/.tableinfo.0000000001.1189; retrying up to 10 times org.apache.hadoop.ipc.RemoteException: File /user/jenkins/test-data/e9c8b8f4-fd78-acb0-c9f5-a2284facf3e4/MasterData/data/master/store/.tabledesc/.tableinfo.0000000001.1189 could only be written to 2 of the 3 required nodes for RS-3-2-1024k. There are 3 datanode(s) running and 3 node(s) are excluded in this operation. at org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.chooseTarget4NewBlock(BlockManager.java:2480) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.chooseTargetForNewBlock(FSDirWriteFileOp.java:293) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.getAdditionalBlock(FSNamesystem.java:3075) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.addBlock(NameNodeRpcServer.java:932) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.addBlock(ClientNamenodeProtocolServerSideTranslatorPB.java:603) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.addBlock(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$addBlock$11(ClientNamenodeProtocolTranslatorPB.java:500) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.addBlock(ClientNamenodeProtocolTranslatorPB.java:500) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSOutputStream.addBlock(DFSOutputStream.java:1143) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSStripedOutputStream.allocateNewBlock(DFSStripedOutputStream.java:508) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSStripedOutputStream.writeChunk(DFSStripedOutputStream.java:561) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FSOutputSummer.writeChecksumChunks(FSOutputSummer.java:220) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.fs.FSOutputSummer.flushBuffer(FSOutputSummer.java:165) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.fs.FSOutputSummer.flushBuffer(FSOutputSummer.java:146) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSStripedOutputStream.closeImpl(DFSStripedOutputStream.java:1234) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSOutputStream.close(DFSOutputStream.java:861) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FSDataOutputStream$PositionCache.close(FSDataOutputStream.java:77) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.fs.FSDataOutputStream.close(FSDataOutputStream.java:106) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.util.FSTableDescriptors.writeTableDescriptor(FSTableDescriptors.java:559) ~[classes/:?] at org.apache.hadoop.hbase.util.FSTableDescriptors.createTableDescriptorForTableDirectory(FSTableDescriptors.java:635) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.bootstrap(MasterRegion.java:241) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:412) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:973) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2470) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:590) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.HMaster.lambda$run$1(HMaster.java:587) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:15:02,255 WARN [IPC Server handler 3 on default port 42781 {}] blockmanagement.BlockPlacementPolicyRackFaultTolerant(145): Only able to place 2 of total expected 3 (maxNodesPerRack=3, numOfReplicas=3) nodes evenly across racks, falling back to evenly place on the remaining racks. This may not guarantee rack-level fault tolerance. Please check if the racks are configured properly. 2024-11-07T12:15:02,256 WARN [IPC Server handler 3 on default port 42781 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-07T12:15:02,256 WARN [IPC Server handler 3 on default port 42781 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-07T12:15:02,256 WARN [IPC Server handler 3 on default port 42781 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-07T12:15:02,258 DEBUG [master/8dcb99358a44:0:becomeActiveMaster {}] util.FSTableDescriptors(563): Failed write hdfs://localhost:42781/user/jenkins/test-data/e9c8b8f4-fd78-acb0-c9f5-a2284facf3e4/MasterData/data/master/store/.tabledesc/.tableinfo.0000000002.1189; retrying up to 10 times org.apache.hadoop.ipc.RemoteException: File /user/jenkins/test-data/e9c8b8f4-fd78-acb0-c9f5-a2284facf3e4/MasterData/data/master/store/.tabledesc/.tableinfo.0000000002.1189 could only be written to 2 of the 3 required nodes for RS-3-2-1024k. There are 3 datanode(s) running and 3 node(s) are excluded in this operation. at org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.chooseTarget4NewBlock(BlockManager.java:2480) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.chooseTargetForNewBlock(FSDirWriteFileOp.java:293) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.getAdditionalBlock(FSNamesystem.java:3075) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.addBlock(NameNodeRpcServer.java:932) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.addBlock(ClientNamenodeProtocolServerSideTranslatorPB.java:603) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.addBlock(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$addBlock$11(ClientNamenodeProtocolTranslatorPB.java:500) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.addBlock(ClientNamenodeProtocolTranslatorPB.java:500) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSOutputStream.addBlock(DFSOutputStream.java:1143) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSStripedOutputStream.allocateNewBlock(DFSStripedOutputStream.java:508) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSStripedOutputStream.writeChunk(DFSStripedOutputStream.java:561) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FSOutputSummer.writeChecksumChunks(FSOutputSummer.java:220) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.fs.FSOutputSummer.flushBuffer(FSOutputSummer.java:165) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.fs.FSOutputSummer.flushBuffer(FSOutputSummer.java:146) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSStripedOutputStream.closeImpl(DFSStripedOutputStream.java:1234) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSOutputStream.close(DFSOutputStream.java:861) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FSDataOutputStream$PositionCache.close(FSDataOutputStream.java:77) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.fs.FSDataOutputStream.close(FSDataOutputStream.java:106) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.util.FSTableDescriptors.writeTableDescriptor(FSTableDescriptors.java:559) ~[classes/:?] at org.apache.hadoop.hbase.util.FSTableDescriptors.createTableDescriptorForTableDirectory(FSTableDescriptors.java:635) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.bootstrap(MasterRegion.java:241) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:412) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:973) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2470) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:590) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.HMaster.lambda$run$1(HMaster.java:587) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:15:02,290 WARN [master/8dcb99358a44:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-07T12:15:02,291 WARN [master/8dcb99358a44:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-07T12:15:02,304 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-322704881_22 at /127.0.0.1:38416 [Receiving block BP-972601932-172.17.0.2-1730981695590:blk_-9223372036854775744_1007] {}] datanode.DataXceiver(331): 127.0.0.1:34323:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38416 dst: /127.0.0.1:34323 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:15:02,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34323 is added to blk_-9223372036854775744_1008 (size=1189) 2024-11-07T12:15:02,324 WARN [master/8dcb99358a44:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-07T12:15:02,340 DEBUG [master/8dcb99358a44:0:becomeActiveMaster {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:42781/user/jenkins/test-data/e9c8b8f4-fd78-acb0-c9f5-a2284facf3e4/MasterData/data/master/store/.tabledesc/.tableinfo.0000000001.1189 2024-11-07T12:15:02,343 DEBUG [master/8dcb99358a44:0:becomeActiveMaster {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:42781/user/jenkins/test-data/e9c8b8f4-fd78-acb0-c9f5-a2284facf3e4/MasterData/data/master/store/.tabledesc/.tableinfo.0000000002.1189 2024-11-07T12:15:02,366 INFO [master/8dcb99358a44:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:42781/user/jenkins/test-data/e9c8b8f4-fd78-acb0-c9f5-a2284facf3e4/MasterData/data/master/store 2024-11-07T12:15:02,385 WARN [IPC Server handler 1 on default port 42781 {}] blockmanagement.BlockPlacementPolicyRackFaultTolerant(145): Only able to place 2 of total expected 3 (maxNodesPerRack=3, numOfReplicas=3) nodes evenly across racks, falling back to evenly place on the remaining racks. This may not guarantee rack-level fault tolerance. Please check if the racks are configured properly. 2024-11-07T12:15:02,385 WARN [IPC Server handler 1 on default port 42781 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-07T12:15:02,385 WARN [IPC Server handler 1 on default port 42781 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-07T12:15:02,385 WARN [IPC Server handler 1 on default port 42781 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-07T12:15:02,386 ERROR [master/8dcb99358a44:0:becomeActiveMaster {}] master.HMaster(2474): Failed to become active master org.apache.hadoop.ipc.RemoteException: File /user/jenkins/test-data/e9c8b8f4-fd78-acb0-c9f5-a2284facf3e4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.regioninfo could only be written to 2 of the 3 required nodes for RS-3-2-1024k. There are 3 datanode(s) running and 3 node(s) are excluded in this operation. at org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.chooseTarget4NewBlock(BlockManager.java:2480) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.chooseTargetForNewBlock(FSDirWriteFileOp.java:293) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.getAdditionalBlock(FSNamesystem.java:3075) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.addBlock(NameNodeRpcServer.java:932) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.addBlock(ClientNamenodeProtocolServerSideTranslatorPB.java:603) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.addBlock(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$addBlock$11(ClientNamenodeProtocolTranslatorPB.java:500) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.addBlock(ClientNamenodeProtocolTranslatorPB.java:500) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSOutputStream.addBlock(DFSOutputStream.java:1143) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSStripedOutputStream.allocateNewBlock(DFSStripedOutputStream.java:508) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSStripedOutputStream.writeChunk(DFSStripedOutputStream.java:561) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FSOutputSummer.writeChecksumChunks(FSOutputSummer.java:220) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.fs.FSOutputSummer.flushBuffer(FSOutputSummer.java:165) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.fs.FSOutputSummer.flushBuffer(FSOutputSummer.java:146) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSStripedOutputStream.closeImpl(DFSStripedOutputStream.java:1234) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSOutputStream.close(DFSOutputStream.java:861) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FSDataOutputStream$PositionCache.close(FSDataOutputStream.java:77) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.fs.FSDataOutputStream.close(FSDataOutputStream.java:106) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.regionserver.HRegionFileSystem.writeRegionInfoFileContent(HRegionFileSystem.java:815) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegionFileSystem.writeRegionInfoOnFilesystem(HRegionFileSystem.java:906) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegionFileSystem.writeRegionInfoOnFilesystem(HRegionFileSystem.java:868) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegionFileSystem.createRegionOnFileSystem(HRegionFileSystem.java:936) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.createHRegion(HRegion.java:7126) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.bootstrap(MasterRegion.java:242) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:412) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:973) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2470) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:590) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.HMaster.lambda$run$1(HMaster.java:587) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:15:02,387 ERROR [master/8dcb99358a44:0:becomeActiveMaster {}] master.HMaster(3167): ***** ABORTING master 8dcb99358a44,35579,1730981700133: Unhandled exception. Starting shutdown. ***** org.apache.hadoop.ipc.RemoteException: File /user/jenkins/test-data/e9c8b8f4-fd78-acb0-c9f5-a2284facf3e4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.regioninfo could only be written to 2 of the 3 required nodes for RS-3-2-1024k. There are 3 datanode(s) running and 3 node(s) are excluded in this operation. at org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.chooseTarget4NewBlock(BlockManager.java:2480) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.chooseTargetForNewBlock(FSDirWriteFileOp.java:293) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.getAdditionalBlock(FSNamesystem.java:3075) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.addBlock(NameNodeRpcServer.java:932) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.addBlock(ClientNamenodeProtocolServerSideTranslatorPB.java:603) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.addBlock(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$addBlock$11(ClientNamenodeProtocolTranslatorPB.java:500) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.addBlock(ClientNamenodeProtocolTranslatorPB.java:500) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSOutputStream.addBlock(DFSOutputStream.java:1143) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSStripedOutputStream.allocateNewBlock(DFSStripedOutputStream.java:508) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSStripedOutputStream.writeChunk(DFSStripedOutputStream.java:561) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FSOutputSummer.writeChecksumChunks(FSOutputSummer.java:220) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.fs.FSOutputSummer.flushBuffer(FSOutputSummer.java:165) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.fs.FSOutputSummer.flushBuffer(FSOutputSummer.java:146) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSStripedOutputStream.closeImpl(DFSStripedOutputStream.java:1234) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSOutputStream.close(DFSOutputStream.java:861) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FSDataOutputStream$PositionCache.close(FSDataOutputStream.java:77) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.fs.FSDataOutputStream.close(FSDataOutputStream.java:106) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.regionserver.HRegionFileSystem.writeRegionInfoFileContent(HRegionFileSystem.java:815) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegionFileSystem.writeRegionInfoOnFilesystem(HRegionFileSystem.java:906) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegionFileSystem.writeRegionInfoOnFilesystem(HRegionFileSystem.java:868) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegionFileSystem.createRegionOnFileSystem(HRegionFileSystem.java:936) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.createHRegion(HRegion.java:7126) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.bootstrap(MasterRegion.java:242) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:412) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:973) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2470) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:590) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.HMaster.lambda$run$1(HMaster.java:587) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:15:02,388 INFO [master/8dcb99358a44:0:becomeActiveMaster {}] regionserver.HRegionServer(2561): ***** STOPPING region server '8dcb99358a44,35579,1730981700133' ***** 2024-11-07T12:15:02,388 INFO [master/8dcb99358a44:0:becomeActiveMaster {}] regionserver.HRegionServer(2575): STOPPED: Stopped by master/8dcb99358a44:0:becomeActiveMaster 2024-11-07T12:15:02,443 INFO [M:0;8dcb99358a44:35579 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:35579 2024-11-07T12:15:02,446 DEBUG [M:0;8dcb99358a44:35579 {}] master.HMaster(3157): Abort called but aborted=true, stopped=true 2024-11-07T12:15:02,446 INFO [M:0;8dcb99358a44:35579 {}] regionserver.HRegionServer(1221): aborting server 8dcb99358a44,35579,1730981700133 2024-11-07T12:15:02,447 INFO [M:0;8dcb99358a44:35579 {}] regionserver.HRegionServer(1250): stopping server 8dcb99358a44,35579,1730981700133; all regions closed. 2024-11-07T12:15:02,447 INFO [M:0;8dcb99358a44:35579 {}] hbase.ChoreService(370): Chore service for: master/8dcb99358a44:0 had [] on shutdown 2024-11-07T12:15:02,447 DEBUG [M:0;8dcb99358a44:35579 {}] master.HMaster(1733): Stopping service threads 2024-11-07T12:15:02,653 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37957-0x101168241c60001, quorum=127.0.0.1:63737, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-07T12:15:02,653 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44967-0x101168241c60002, quorum=127.0.0.1:63737, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-07T12:15:02,653 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33781-0x101168241c60003, quorum=127.0.0.1:63737, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-07T12:15:02,653 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33781-0x101168241c60003, quorum=127.0.0.1:63737, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:15:02,654 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35579-0x101168241c60000, quorum=127.0.0.1:63737, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-07T12:15:02,654 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35579-0x101168241c60000, quorum=127.0.0.1:63737, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:15:02,654 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44967-0x101168241c60002, quorum=127.0.0.1:63737, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:15:02,655 DEBUG [M:0;8dcb99358a44:35579 {}] zookeeper.ZKUtil(347): master:35579-0x101168241c60000, quorum=127.0.0.1:63737, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-07T12:15:02,655 WARN [M:0;8dcb99358a44:35579 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-07T12:15:02,656 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44967-0x101168241c60002, quorum=127.0.0.1:63737, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-07T12:15:02,657 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37957-0x101168241c60001, quorum=127.0.0.1:63737, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:15:02,657 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:35579-0x101168241c60000, quorum=127.0.0.1:63737, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-07T12:15:02,672 DEBUG [M:0;8dcb99358a44:35579 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/8dcb99358a44,35579,1730981700133 already deleted, retry=false 2024-11-07T12:15:02,673 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33781-0x101168241c60003, quorum=127.0.0.1:63737, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-07T12:15:02,673 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37957-0x101168241c60001, quorum=127.0.0.1:63737, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-07T12:15:02,785 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35579-0x101168241c60000, quorum=127.0.0.1:63737, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T12:15:02,785 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35579-0x101168241c60000, quorum=127.0.0.1:63737, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T12:15:02,785 INFO [M:0;8dcb99358a44:35579 {}] regionserver.HRegionServer(1307): Exiting; stopping=8dcb99358a44,35579,1730981700133; zookeeper connection closed. 2024-11-07T12:15:05,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40081 is added to blk_-9223372036854775772_1004 (size=42) 2024-11-07T12:15:05,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32897 is added to blk_-9223372036854775773_1004 (size=42) 2024-11-07T12:15:05,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32897 is added to blk_-9223372036854775741_1008 (size=1189) 2024-11-07T12:15:05,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40081 is added to blk_-9223372036854775740_1008 (size=1189) 2024-11-07T12:15:05,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32897 is added to blk_-9223372036854775756_1006 (size=196) 2024-11-07T12:15:05,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40081 is added to blk_-9223372036854775757_1006 (size=196) 2024-11-07T12:15:29,227 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Thread dump because: Master not active after 30000ms 248 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 4 Waited count: 5 Waiting on java.lang.ref.ReferenceQueue$Lock@574974aa Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 6 Waited count: 7 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7d813fe6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: RUNNABLE Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 384 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.CountDownLatch$Sync@58d18956 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 20 (process reaper): State: TIMED_WAITING Blocked count: 2 Waited count: 16 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 84 Waited count: 401 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.JVMClusterUtil.waitForEvent(JVMClusterUtil.java:220) app//org.apache.hadoop.hbase.util.JVMClusterUtil.startup(JVMClusterUtil.java:177) app//org.apache.hadoop.hbase.LocalHBaseCluster.startup(LocalHBaseCluster.java:407) app//org.apache.hadoop.hbase.MiniHBaseCluster.init(MiniHBaseCluster.java:250) app//org.apache.hadoop.hbase.MiniHBaseCluster.(MiniHBaseCluster.java:110) app//org.apache.hadoop.hbase.HBaseTestingUtility.startMiniHBaseCluster(HBaseTestingUtility.java:1185) app//org.apache.hadoop.hbase.HBaseTestingUtility.startMiniCluster(HBaseTestingUtility.java:1152) app//org.apache.hadoop.hbase.HBaseTestingUtility.startMiniCluster(HBaseTestingUtility.java:1106) app//org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.setUp(TestHBaseWalOnEC.java:107) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@33d43240 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@7df1e6cf Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 33 (Timer for 'NameNode' metrics system): State: TIMED_WAITING Blocked count: 1 Waited count: 5 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@70d42f3a): State: TIMED_WAITING Blocked count: 0 Waited count: 72 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-5-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp977763867-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f8500445ff8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp977763867-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f8500445ff8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp977763867-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f8500445ff8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp977763867-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f8500445ff8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp977763867-41-acceptor-0@1a59d9a4-ServerConnector@6a51423c{HTTP/1.1, (http/1.1)}{localhost:39589}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp977763867-42): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp977763867-43): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp977763867-44): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-2bd78111-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 2 Waited count: 55 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@47b17bba Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 42781): State: TIMED_WAITING Blocked count: 1 Waited count: 5 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@237a2c0d): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@782ca272): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 1 Waited count: 12 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 3405 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 0 Waited count: 13 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6f30981a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 42781): State: TIMED_WAITING Blocked count: 6 Waited count: 60 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 42781): State: TIMED_WAITING Blocked count: 23 Waited count: 59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 42781): State: TIMED_WAITING Blocked count: 0 Waited count: 55 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 42781): State: TIMED_WAITING Blocked count: 16 Waited count: 56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 42781): State: TIMED_WAITING Blocked count: 1 Waited count: 57 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-11-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@3888c7a4): State: TIMED_WAITING Blocked count: 0 Waited count: 18 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@28213cbf): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@632a3c7f): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@7412db1f): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(565345681)): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 76 (StorageLocationChecker thread 0): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 77 (StorageLocationChecker thread 1): State: TIMED_WAITING Blocked count: 4 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 79 (process reaper): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 86 (pool-17-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp2110397901-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f8500445ff8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp2110397901-88-acceptor-0@1d2ba29-ServerConnector@1c108191{HTTP/1.1, (http/1.1)}{localhost:35135}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp2110397901-89): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp2110397901-90): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-652fabec-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@73520b8): State: TIMED_WAITING Blocked count: 0 Waited count: 68 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 44645): State: TIMED_WAITING Blocked count: 1 Waited count: 5 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 0 Waited count: 13 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1a9ceee7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-972601932-172.17.0.2-1730981695590 heartbeating to localhost/127.0.0.1:42781): State: TIMED_WAITING Blocked count: 144 Waited count: 42 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-19-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@5417ba00): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 44645): State: TIMED_WAITING Blocked count: 0 Waited count: 42 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 44645): State: TIMED_WAITING Blocked count: 0 Waited count: 49 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 44645): State: TIMED_WAITING Blocked count: 0 Waited count: 34 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 44645): State: TIMED_WAITING Blocked count: 0 Waited count: 35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 44645): State: TIMED_WAITING Blocked count: 0 Waited count: 38 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 109 (StorageLocationChecker thread 0): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 110 (StorageLocationChecker thread 1): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (pool-25-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp1898383070-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f8500445ff8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp1898383070-122-acceptor-0@6d302bb3-ServerConnector@926802c{HTTP/1.1, (http/1.1)}{localhost:41037}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp1898383070-123): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp1898383070-124): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-53dfae5-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 116 (IPC Client (370908447) connection to localhost/127.0.0.1:42781 from jenkins): State: TIMED_WAITING Blocked count: 37 Waited count: 38 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 118 (IPC Parameter Sending Thread for localhost/127.0.0.1:42781): State: TIMED_WAITING Blocked count: 0 Waited count: 41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@59519e03): State: TIMED_WAITING Blocked count: 0 Waited count: 68 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 34481): State: TIMED_WAITING Blocked count: 1 Waited count: 5 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 0 Waited count: 13 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1174c758 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-972601932-172.17.0.2-1730981695590 heartbeating to localhost/127.0.0.1:42781): State: TIMED_WAITING Blocked count: 96 Waited count: 44 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-28-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@217aaf13): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 34481): State: TIMED_WAITING Blocked count: 0 Waited count: 41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 34481): State: TIMED_WAITING Blocked count: 0 Waited count: 39 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 34481): State: TIMED_WAITING Blocked count: 0 Waited count: 44 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 34481): State: TIMED_WAITING Blocked count: 0 Waited count: 34 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 34481): State: TIMED_WAITING Blocked count: 0 Waited count: 34 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 143 (StorageLocationChecker thread 0): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 144 (StorageLocationChecker thread 1): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 152 (pool-35-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp987109862-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f8500445ff8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp987109862-154-acceptor-0@e3ae15e-ServerConnector@1f02298f{HTTP/1.1, (http/1.1)}{localhost:34415}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp987109862-155): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp987109862-156): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-7235d37-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@6bc79822): State: TIMED_WAITING Blocked count: 0 Waited count: 67 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 45783): State: TIMED_WAITING Blocked count: 1 Waited count: 5 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 0 Waited count: 13 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dfb15ed Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-972601932-172.17.0.2-1730981695590 heartbeating to localhost/127.0.0.1:42781): State: TIMED_WAITING Blocked count: 29 Waited count: 40 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-37-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@5b59729d): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 45783): State: TIMED_WAITING Blocked count: 0 Waited count: 43 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 45783): State: TIMED_WAITING Blocked count: 0 Waited count: 49 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 45783): State: TIMED_WAITING Blocked count: 0 Waited count: 50 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 45783): State: TIMED_WAITING Blocked count: 0 Waited count: 41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 45783): State: TIMED_WAITING Blocked count: 0 Waited count: 36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8d1e50b3-3fdd-78ef-49e3-a9f951961ca0/cluster_ff6fcda1-4aaa-0136-7e73-55f15224e83f/dfs/data/data3)): State: TIMED_WAITING Blocked count: 26 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8d1e50b3-3fdd-78ef-49e3-a9f951961ca0/cluster_ff6fcda1-4aaa-0136-7e73-55f15224e83f/dfs/data/data1)): State: TIMED_WAITING Blocked count: 28 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 187 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8d1e50b3-3fdd-78ef-49e3-a9f951961ca0/cluster_ff6fcda1-4aaa-0136-7e73-55f15224e83f/dfs/data/data2)): State: TIMED_WAITING Blocked count: 27 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 188 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8d1e50b3-3fdd-78ef-49e3-a9f951961ca0/cluster_ff6fcda1-4aaa-0136-7e73-55f15224e83f/dfs/data/data4)): State: TIMED_WAITING Blocked count: 24 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 195 (process reaper): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 196 (process reaper): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 199 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8d1e50b3-3fdd-78ef-49e3-a9f951961ca0/cluster_ff6fcda1-4aaa-0136-7e73-55f15224e83f/dfs/data/data2/current/BP-972601932-172.17.0.2-1730981695590): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8d1e50b3-3fdd-78ef-49e3-a9f951961ca0/cluster_ff6fcda1-4aaa-0136-7e73-55f15224e83f/dfs/data/data3/current/BP-972601932-172.17.0.2-1730981695590): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 201 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8d1e50b3-3fdd-78ef-49e3-a9f951961ca0/cluster_ff6fcda1-4aaa-0136-7e73-55f15224e83f/dfs/data/data4/current/BP-972601932-172.17.0.2-1730981695590): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 202 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8d1e50b3-3fdd-78ef-49e3-a9f951961ca0/cluster_ff6fcda1-4aaa-0136-7e73-55f15224e83f/dfs/data/data1/current/BP-972601932-172.17.0.2-1730981695590): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 205 (ForkJoinPool-2-worker-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.ForkJoinPool@ed68d3d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 206 (ForkJoinPool-2-worker-2): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.ForkJoinPool@ed68d3d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 207 (ForkJoinPool-2-worker-3): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.ForkJoinPool@ed68d3d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 208 (ForkJoinPool-2-worker-4): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 209 (DataNode DiskChecker thread 0): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 210 (pool-22-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 213 (DataNode DiskChecker thread 0): State: TIMED_WAITING Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 214 (pool-14-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 215 (DataNode DiskChecker thread 1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 220 (java.util.concurrent.ThreadPoolExecutor$Worker@4362ce89[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 221 (java.util.concurrent.ThreadPoolExecutor$Worker@4818ac31[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 222 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8d1e50b3-3fdd-78ef-49e3-a9f951961ca0/cluster_ff6fcda1-4aaa-0136-7e73-55f15224e83f/dfs/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 223 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8d1e50b3-3fdd-78ef-49e3-a9f951961ca0/cluster_ff6fcda1-4aaa-0136-7e73-55f15224e83f/dfs/data/data6)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 228 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8d1e50b3-3fdd-78ef-49e3-a9f951961ca0/cluster_ff6fcda1-4aaa-0136-7e73-55f15224e83f/dfs/data/data5/current/BP-972601932-172.17.0.2-1730981695590): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 229 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8d1e50b3-3fdd-78ef-49e3-a9f951961ca0/cluster_ff6fcda1-4aaa-0136-7e73-55f15224e83f/dfs/data/data6/current/BP-972601932-172.17.0.2-1730981695590): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 232 (DataNode DiskChecker thread 0): State: TIMED_WAITING Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 233 (pool-32-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 234 (DataNode DiskChecker thread 1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 237 (java.util.concurrent.ThreadPoolExecutor$Worker@19a2da69[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 238 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtility.java:599) Thread 244 (LeaseRenewer:jenkins@localhost:42781): State: TIMED_WAITING Blocked count: 0 Waited count: 33 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 252 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 251 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 15 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 253 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:63737): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 250 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 254 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 17 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 255 (SyncThread:0): State: WAITING Blocked count: 1 Waited count: 70 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@43b0901b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 256 (ProcessThread(sid:0 cport:63737):): State: WAITING Blocked count: 0 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@707177cb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 257 (RequestThrottler): State: WAITING Blocked count: 1 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2fc7d0ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 258 (NIOWorkerThread-1): State: WAITING Blocked count: 0 Waited count: 13 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2c00671d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6bf71ace Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (RS-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 279 (NIOWorkerThread-2): State: WAITING Blocked count: 0 Waited count: 13 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2c00671d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 280 (NIOWorkerThread-3): State: WAITING Blocked count: 0 Waited count: 13 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2c00671d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 282 (NIOWorkerThread-4): State: WAITING Blocked count: 0 Waited count: 13 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2c00671d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 283 (NIOWorkerThread-5): State: WAITING Blocked count: 0 Waited count: 13 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2c00671d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 284 (NIOWorkerThread-6): State: WAITING Blocked count: 0 Waited count: 13 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2c00671d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 285 (NIOWorkerThread-7): State: WAITING Blocked count: 0 Waited count: 13 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2c00671d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 286 (NIOWorkerThread-8): State: WAITING Blocked count: 0 Waited count: 13 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2c00671d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 287 (NIOWorkerThread-9): State: WAITING Blocked count: 0 Waited count: 14 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2c00671d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 288 (NIOWorkerThread-10): State: WAITING Blocked count: 0 Waited count: 13 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2c00671d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 289 (NIOWorkerThread-11): State: WAITING Blocked count: 0 Waited count: 13 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2c00671d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 290 (NIOWorkerThread-12): State: WAITING Blocked count: 1 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2c00671d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 291 (NIOWorkerThread-13): State: WAITING Blocked count: 1 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2c00671d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 292 (NIOWorkerThread-14): State: WAITING Blocked count: 0 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2c00671d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 293 (NIOWorkerThread-15): State: WAITING Blocked count: 0 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2c00671d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 294 (NIOWorkerThread-16): State: WAITING Blocked count: 0 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2c00671d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 308 (RS-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 309 (Time-limited test.LruBlockCache.EvictionThread): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.io.hfile.LruBlockCache$EvictionThread.run(LruBlockCache.java:957) Thread 311 (LruBlockCacheStatsExecutor): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 313 (MobFileCache #0): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 314 (Time-limited test-SendThread(127.0.0.1:63737)): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 315 (Time-limited test-EventThread): State: WAITING Blocked count: 4 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7281dd69 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 316 (zk-event-processor-pool-0): State: WAITING Blocked count: 10 Waited count: 14 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@63aaec3c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 317 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37957): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@6039d2db Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 318 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37957): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@22ea0354 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 319 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37957): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@5efce8f5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 320 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37957): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6a03341a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 321 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37957): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@44828c8b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 322 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37957): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@44828c8b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 323 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=37957): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@3680c7ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 324 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=37957): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@f023a89 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 325 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=37957): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@7c527c97 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 326 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=37957): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@2c645142 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 330 (RS-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 331 (Time-limited test.LruBlockCache.EvictionThread): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.io.hfile.LruBlockCache$EvictionThread.run(LruBlockCache.java:957) Thread 333 (LruBlockCacheStatsExecutor): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 335 (MobFileCache #0): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 336 (Time-limited test-SendThread(127.0.0.1:63737)): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 337 (Time-limited test-EventThread): State: WAITING Blocked count: 2 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@631a3b48 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 338 (zk-event-processor-pool-0): State: WAITING Blocked count: 8 Waited count: 14 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@25d3dcca Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 339 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44967): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@38b25760 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 340 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44967): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@13e5cbb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 341 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44967): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@334cf7af Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 342 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44967): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@8c24880 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 343 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44967): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@226107af Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 344 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44967): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@226107af Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 345 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=44967): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@51c95cdc Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 346 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=44967): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@56c28c5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 347 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=44967): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@1f70e6a0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 348 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=44967): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@3506d69e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 352 (RS-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 353 (Time-limited test.LruBlockCache.EvictionThread): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.io.hfile.LruBlockCache$EvictionThread.run(LruBlockCache.java:957) Thread 355 (LruBlockCacheStatsExecutor): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 357 (MobFileCache #0): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 358 (Time-limited test-SendThread(127.0.0.1:63737)): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 359 (Time-limited test-EventThread): State: WAITING Blocked count: 2 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@511058a8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 360 (zk-event-processor-pool-0): State: WAITING Blocked count: 9 Waited count: 14 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6d845977 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 361 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33781): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@44b3ce1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 362 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33781): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@18fc23aa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 363 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33781): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@4bfafd05 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 364 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33781): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4fa5aec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 365 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33781): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5ee3a753 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 366 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33781): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5ee3a753 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 367 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=33781): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@277b2e56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 368 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=33781): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@3bf5fd69 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 369 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=33781): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@6d0eedc1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 370 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=33781): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@2e14160c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 375 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 377 (master/8dcb99358a44:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 379 (master/8dcb99358a44:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 381 (StripedRead-0): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 382 (org.apache.hadoop.hdfs.PeerCache@16d31f31): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 413 (StripedBlockReconstruction-0): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 417 (stripedRead-0): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 418 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 296 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:179) Thread 448 (StripedBlockReconstruction-0): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 449 (StripedBlockReconstruction-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 454 (stripedRead-0): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 457 (StripedBlockReconstruction-0): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 461 (stripedRead-0): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) 2024-11-07T12:15:31,725 ERROR [Time-limited test {}] hbase.MiniHBaseCluster(255): Error starting cluster java.lang.RuntimeException: Master not active after 30000ms at org.apache.hadoop.hbase.util.JVMClusterUtil.waitForEvent(JVMClusterUtil.java:221) ~[classes/:?] at org.apache.hadoop.hbase.util.JVMClusterUtil.startup(JVMClusterUtil.java:177) ~[classes/:?] at org.apache.hadoop.hbase.LocalHBaseCluster.startup(LocalHBaseCluster.java:407) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.MiniHBaseCluster.init(MiniHBaseCluster.java:250) ~[test-classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.MiniHBaseCluster.(MiniHBaseCluster.java:110) ~[test-classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtility.startMiniHBaseCluster(HBaseTestingUtility.java:1185) ~[test-classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtility.startMiniCluster(HBaseTestingUtility.java:1152) ~[test-classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtility.startMiniCluster(HBaseTestingUtility.java:1106) ~[test-classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.setUp(TestHBaseWalOnEC.java:107) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.invokeMethod(RunBefores.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:24) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.run(ParentRunner.java:413) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.Suite.runChild(Suite.java:128) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.Suite.runChild(Suite.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T12:15:31,726 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-07T12:15:31,726 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '8dcb99358a44,37957,1730981701289' ***** 2024-11-07T12:15:31,726 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-11-07T12:15:31,726 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '8dcb99358a44,44967,1730981701432' ***** 2024-11-07T12:15:31,726 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-11-07T12:15:31,727 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '8dcb99358a44,33781,1730981701539' ***** 2024-11-07T12:15:31,727 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-11-07T12:15:31,727 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-11-07T12:15:31,727 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-11-07T12:15:31,731 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@62da31d0{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:15:31,734 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1f02298f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-07T12:15:31,734 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-07T12:15:31,734 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@24758d60{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-07T12:15:31,735 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5ed07bd7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8d1e50b3-3fdd-78ef-49e3-a9f951961ca0/hadoop.log.dir/,STOPPED} 2024-11-07T12:15:31,739 WARN [BP-972601932-172.17.0.2-1730981695590 heartbeating to localhost/127.0.0.1:42781 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-07T12:15:31,739 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-07T12:15:31,739 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-07T12:15:31,739 WARN [BP-972601932-172.17.0.2-1730981695590 heartbeating to localhost/127.0.0.1:42781 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-972601932-172.17.0.2-1730981695590 (Datanode Uuid fdd1d357-0485-4b1a-bc24-8e77d0151f2a) service to localhost/127.0.0.1:42781 2024-11-07T12:15:31,740 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8d1e50b3-3fdd-78ef-49e3-a9f951961ca0/cluster_ff6fcda1-4aaa-0136-7e73-55f15224e83f/dfs/data/data5/current/BP-972601932-172.17.0.2-1730981695590 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:15:31,740 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8d1e50b3-3fdd-78ef-49e3-a9f951961ca0/cluster_ff6fcda1-4aaa-0136-7e73-55f15224e83f/dfs/data/data6/current/BP-972601932-172.17.0.2-1730981695590 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:15:31,741 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-07T12:15:31,743 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7aa37120{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:15:31,744 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@926802c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-07T12:15:31,744 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-07T12:15:31,744 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1a863ca9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-07T12:15:31,744 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5fed20b9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8d1e50b3-3fdd-78ef-49e3-a9f951961ca0/hadoop.log.dir/,STOPPED} 2024-11-07T12:15:31,746 WARN [BP-972601932-172.17.0.2-1730981695590 heartbeating to localhost/127.0.0.1:42781 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-07T12:15:31,746 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-07T12:15:31,746 WARN [BP-972601932-172.17.0.2-1730981695590 heartbeating to localhost/127.0.0.1:42781 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-972601932-172.17.0.2-1730981695590 (Datanode Uuid d2563bbf-23c1-4a30-bde2-16fcc338cd16) service to localhost/127.0.0.1:42781 2024-11-07T12:15:31,746 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-07T12:15:31,747 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8d1e50b3-3fdd-78ef-49e3-a9f951961ca0/cluster_ff6fcda1-4aaa-0136-7e73-55f15224e83f/dfs/data/data3/current/BP-972601932-172.17.0.2-1730981695590 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:15:31,747 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8d1e50b3-3fdd-78ef-49e3-a9f951961ca0/cluster_ff6fcda1-4aaa-0136-7e73-55f15224e83f/dfs/data/data4/current/BP-972601932-172.17.0.2-1730981695590 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:15:31,748 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-07T12:15:31,750 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@265f851e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:15:31,751 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1c108191{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-07T12:15:31,751 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-07T12:15:31,751 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@73696fa8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-07T12:15:31,751 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@15576943{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8d1e50b3-3fdd-78ef-49e3-a9f951961ca0/hadoop.log.dir/,STOPPED} 2024-11-07T12:15:31,753 WARN [BP-972601932-172.17.0.2-1730981695590 heartbeating to localhost/127.0.0.1:42781 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-07T12:15:31,753 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-07T12:15:31,753 WARN [BP-972601932-172.17.0.2-1730981695590 heartbeating to localhost/127.0.0.1:42781 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-972601932-172.17.0.2-1730981695590 (Datanode Uuid 73adb480-ce5d-42af-ae47-ce3a4d13bc85) service to localhost/127.0.0.1:42781 2024-11-07T12:15:31,753 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-07T12:15:31,754 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8d1e50b3-3fdd-78ef-49e3-a9f951961ca0/cluster_ff6fcda1-4aaa-0136-7e73-55f15224e83f/dfs/data/data1/current/BP-972601932-172.17.0.2-1730981695590 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:15:31,754 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8d1e50b3-3fdd-78ef-49e3-a9f951961ca0/cluster_ff6fcda1-4aaa-0136-7e73-55f15224e83f/dfs/data/data2/current/BP-972601932-172.17.0.2-1730981695590 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:15:31,754 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-07T12:15:31,766 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7eaa7387{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-07T12:15:31,766 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6a51423c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-07T12:15:31,767 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-07T12:15:31,767 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2428f4dd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-07T12:15:31,767 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6fcea601{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8d1e50b3-3fdd-78ef-49e3-a9f951961ca0/hadoop.log.dir/,STOPPED} 2024-11-07T12:15:31,775 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-11-07T12:15:31,806 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down 2024-11-07T12:15:31,818 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=105 (was 159), OpenFileDescriptor=373 (was 392), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=290 (was 295), ProcessCount=11 (was 11), AvailableMemoryMB=4145 (was 5016) 2024-11-07T12:15:31,824 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=105, OpenFileDescriptor=373, MaxFileDescriptor=1048576, SystemLoadAverage=290, ProcessCount=11, AvailableMemoryMB=4146 2024-11-07T12:15:31,824 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-07T12:15:31,824 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8d1e50b3-3fdd-78ef-49e3-a9f951961ca0/hadoop.log.dir so I do NOT create it in target/test-data/94a1b264-07f8-bb43-36bf-3de40a7e9255 2024-11-07T12:15:31,824 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8d1e50b3-3fdd-78ef-49e3-a9f951961ca0/hadoop.tmp.dir so I do NOT create it in target/test-data/94a1b264-07f8-bb43-36bf-3de40a7e9255 2024-11-07T12:15:31,824 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94a1b264-07f8-bb43-36bf-3de40a7e9255/cluster_671de6c1-df2a-7f63-627e-b06ba20bfa15, deleteOnExit=true 2024-11-07T12:15:31,825 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-11-07T12:15:31,825 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94a1b264-07f8-bb43-36bf-3de40a7e9255/test.cache.data in system properties and HBase conf 2024-11-07T12:15:31,825 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94a1b264-07f8-bb43-36bf-3de40a7e9255/hadoop.tmp.dir in system properties and HBase conf 2024-11-07T12:15:31,825 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94a1b264-07f8-bb43-36bf-3de40a7e9255/hadoop.log.dir in system properties and HBase conf 2024-11-07T12:15:31,825 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94a1b264-07f8-bb43-36bf-3de40a7e9255/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-07T12:15:31,825 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94a1b264-07f8-bb43-36bf-3de40a7e9255/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-07T12:15:31,825 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-11-07T12:15:31,826 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-07T12:15:31,826 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94a1b264-07f8-bb43-36bf-3de40a7e9255/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-07T12:15:31,826 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94a1b264-07f8-bb43-36bf-3de40a7e9255/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-07T12:15:31,826 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94a1b264-07f8-bb43-36bf-3de40a7e9255/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-07T12:15:31,826 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94a1b264-07f8-bb43-36bf-3de40a7e9255/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-07T12:15:31,826 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94a1b264-07f8-bb43-36bf-3de40a7e9255/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-07T12:15:31,826 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94a1b264-07f8-bb43-36bf-3de40a7e9255/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-07T12:15:31,827 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94a1b264-07f8-bb43-36bf-3de40a7e9255/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-07T12:15:31,827 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94a1b264-07f8-bb43-36bf-3de40a7e9255/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-07T12:15:31,827 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94a1b264-07f8-bb43-36bf-3de40a7e9255/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-07T12:15:31,827 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94a1b264-07f8-bb43-36bf-3de40a7e9255/nfs.dump.dir in system properties and HBase conf 2024-11-07T12:15:31,827 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94a1b264-07f8-bb43-36bf-3de40a7e9255/java.io.tmpdir in system properties and HBase conf 2024-11-07T12:15:31,827 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94a1b264-07f8-bb43-36bf-3de40a7e9255/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-07T12:15:31,827 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94a1b264-07f8-bb43-36bf-3de40a7e9255/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-07T12:15:31,828 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94a1b264-07f8-bb43-36bf-3de40a7e9255/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-07T12:15:31,873 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33781-0x101168241c60003, quorum=127.0.0.1:63737, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Disconnected, path=null 2024-11-07T12:15:31,873 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44967-0x101168241c60002, quorum=127.0.0.1:63737, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Disconnected, path=null 2024-11-07T12:15:31,873 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37957-0x101168241c60001, quorum=127.0.0.1:63737, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Disconnected, path=null 2024-11-07T12:15:31,873 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33781-0x101168241c60003, quorum=127.0.0.1:63737, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Disconnected, path=null 2024-11-07T12:15:31,873 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44967-0x101168241c60002, quorum=127.0.0.1:63737, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Disconnected, path=null 2024-11-07T12:15:31,873 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(640): regionserver:44967-0x101168241c60002, quorum=127.0.0.1:63737, baseZNode=/hbase Received Disconnected from ZooKeeper, ignoring 2024-11-07T12:15:31,873 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(640): regionserver:37957-0x101168241c60001, quorum=127.0.0.1:63737, baseZNode=/hbase Received Disconnected from ZooKeeper, ignoring 2024-11-07T12:15:31,873 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37957-0x101168241c60001, quorum=127.0.0.1:63737, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Disconnected, path=null 2024-11-07T12:15:31,873 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(640): regionserver:44967-0x101168241c60002, quorum=127.0.0.1:63737, baseZNode=/hbase Received Disconnected from ZooKeeper, ignoring 2024-11-07T12:15:31,873 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(640): regionserver:37957-0x101168241c60001, quorum=127.0.0.1:63737, baseZNode=/hbase Received Disconnected from ZooKeeper, ignoring 2024-11-07T12:15:31,873 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(640): regionserver:33781-0x101168241c60003, quorum=127.0.0.1:63737, baseZNode=/hbase Received Disconnected from ZooKeeper, ignoring 2024-11-07T12:15:31,873 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(640): regionserver:33781-0x101168241c60003, quorum=127.0.0.1:63737, baseZNode=/hbase Received Disconnected from ZooKeeper, ignoring 2024-11-07T12:15:32,161 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T12:15:32,171 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-07T12:15:32,173 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-07T12:15:32,173 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-07T12:15:32,173 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-07T12:15:32,174 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T12:15:32,175 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@17455d4e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94a1b264-07f8-bb43-36bf-3de40a7e9255/hadoop.log.dir/,AVAILABLE} 2024-11-07T12:15:32,175 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5cd6d6cf{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-07T12:15:32,278 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4a25ecf1{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94a1b264-07f8-bb43-36bf-3de40a7e9255/java.io.tmpdir/jetty-localhost-32849-hadoop-hdfs-3_4_1-tests_jar-_-any-15877887654052438351/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-07T12:15:32,279 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2ea4011e{HTTP/1.1, (http/1.1)}{localhost:32849} 2024-11-07T12:15:32,279 INFO [Time-limited test {}] server.Server(415): Started @39402ms 2024-11-07T12:15:32,497 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T12:15:32,502 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-07T12:15:32,503 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-07T12:15:32,503 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-07T12:15:32,504 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-07T12:15:32,504 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3806bae2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94a1b264-07f8-bb43-36bf-3de40a7e9255/hadoop.log.dir/,AVAILABLE} 2024-11-07T12:15:32,505 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@753a7704{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-07T12:15:32,599 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5ba3a5d8{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94a1b264-07f8-bb43-36bf-3de40a7e9255/java.io.tmpdir/jetty-localhost-46721-hadoop-hdfs-3_4_1-tests_jar-_-any-2975831600616848037/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:15:32,599 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6fa17b83{HTTP/1.1, (http/1.1)}{localhost:46721} 2024-11-07T12:15:32,599 INFO [Time-limited test {}] server.Server(415): Started @39722ms 2024-11-07T12:15:32,601 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-07T12:15:32,635 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T12:15:32,638 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-07T12:15:32,641 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-07T12:15:32,641 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-07T12:15:32,641 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-07T12:15:32,641 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@43876932{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94a1b264-07f8-bb43-36bf-3de40a7e9255/hadoop.log.dir/,AVAILABLE} 2024-11-07T12:15:32,642 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@52790ff{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-07T12:15:32,736 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@15aff75{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94a1b264-07f8-bb43-36bf-3de40a7e9255/java.io.tmpdir/jetty-localhost-38887-hadoop-hdfs-3_4_1-tests_jar-_-any-359725367238275406/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:15:32,736 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@9338a9d{HTTP/1.1, (http/1.1)}{localhost:38887} 2024-11-07T12:15:32,736 INFO [Time-limited test {}] server.Server(415): Started @39859ms 2024-11-07T12:15:32,738 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-07T12:15:32,775 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T12:15:32,779 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-07T12:15:32,780 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-07T12:15:32,780 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-07T12:15:32,780 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-07T12:15:32,782 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@43d8d8de{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94a1b264-07f8-bb43-36bf-3de40a7e9255/hadoop.log.dir/,AVAILABLE} 2024-11-07T12:15:32,782 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1de7ceda{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-07T12:15:32,876 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6ed08ade{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94a1b264-07f8-bb43-36bf-3de40a7e9255/java.io.tmpdir/jetty-localhost-42703-hadoop-hdfs-3_4_1-tests_jar-_-any-3606377470650038630/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:15:32,877 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@54f72b24{HTTP/1.1, (http/1.1)}{localhost:42703} 2024-11-07T12:15:32,877 INFO [Time-limited test {}] server.Server(415): Started @40000ms 2024-11-07T12:15:32,878 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-07T12:15:33,319 WARN [Thread-350 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94a1b264-07f8-bb43-36bf-3de40a7e9255/cluster_671de6c1-df2a-7f63-627e-b06ba20bfa15/dfs/data/data1/current/BP-1589988083-172.17.0.2-1730981731858/current, will proceed with Du for space computation calculation, 2024-11-07T12:15:33,319 WARN [Thread-351 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94a1b264-07f8-bb43-36bf-3de40a7e9255/cluster_671de6c1-df2a-7f63-627e-b06ba20bfa15/dfs/data/data2/current/BP-1589988083-172.17.0.2-1730981731858/current, will proceed with Du for space computation calculation, 2024-11-07T12:15:33,339 WARN [Thread-293 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-07T12:15:33,342 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3c07bf64269069d3 with lease ID 0xf23bf9af324f0edf: Processing first storage report for DS-9735151f-bdb4-4b5c-8055-929b41b34105 from datanode DatanodeRegistration(127.0.0.1:34265, datanodeUuid=b0f7ecdf-16dd-427d-9f52-89b89d5db43b, infoPort=35009, infoSecurePort=0, ipcPort=43533, storageInfo=lv=-57;cid=testClusterID;nsid=1816791128;c=1730981731858) 2024-11-07T12:15:33,342 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3c07bf64269069d3 with lease ID 0xf23bf9af324f0edf: from storage DS-9735151f-bdb4-4b5c-8055-929b41b34105 node DatanodeRegistration(127.0.0.1:34265, datanodeUuid=b0f7ecdf-16dd-427d-9f52-89b89d5db43b, infoPort=35009, infoSecurePort=0, ipcPort=43533, storageInfo=lv=-57;cid=testClusterID;nsid=1816791128;c=1730981731858), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:15:33,342 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3c07bf64269069d3 with lease ID 0xf23bf9af324f0edf: Processing first storage report for DS-e725fa0d-2f02-4308-8f7d-5586a7c2045f from datanode DatanodeRegistration(127.0.0.1:34265, datanodeUuid=b0f7ecdf-16dd-427d-9f52-89b89d5db43b, infoPort=35009, infoSecurePort=0, ipcPort=43533, storageInfo=lv=-57;cid=testClusterID;nsid=1816791128;c=1730981731858) 2024-11-07T12:15:33,342 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3c07bf64269069d3 with lease ID 0xf23bf9af324f0edf: from storage DS-e725fa0d-2f02-4308-8f7d-5586a7c2045f node DatanodeRegistration(127.0.0.1:34265, datanodeUuid=b0f7ecdf-16dd-427d-9f52-89b89d5db43b, infoPort=35009, infoSecurePort=0, ipcPort=43533, storageInfo=lv=-57;cid=testClusterID;nsid=1816791128;c=1730981731858), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-07T12:15:33,739 WARN [Thread-364 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94a1b264-07f8-bb43-36bf-3de40a7e9255/cluster_671de6c1-df2a-7f63-627e-b06ba20bfa15/dfs/data/data3/current/BP-1589988083-172.17.0.2-1730981731858/current, will proceed with Du for space computation calculation, 2024-11-07T12:15:33,739 WARN [Thread-365 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94a1b264-07f8-bb43-36bf-3de40a7e9255/cluster_671de6c1-df2a-7f63-627e-b06ba20bfa15/dfs/data/data4/current/BP-1589988083-172.17.0.2-1730981731858/current, will proceed with Du for space computation calculation, 2024-11-07T12:15:33,758 WARN [Thread-316 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-07T12:15:33,761 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8f58e5c6c9be506c with lease ID 0xf23bf9af324f0ee0: Processing first storage report for DS-9bf8c78f-4cdb-4c83-8ec4-74c69841fed7 from datanode DatanodeRegistration(127.0.0.1:34531, datanodeUuid=1fa3454c-4d8f-4868-9178-23bbe0f4f656, infoPort=37763, infoSecurePort=0, ipcPort=43757, storageInfo=lv=-57;cid=testClusterID;nsid=1816791128;c=1730981731858) 2024-11-07T12:15:33,761 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8f58e5c6c9be506c with lease ID 0xf23bf9af324f0ee0: from storage DS-9bf8c78f-4cdb-4c83-8ec4-74c69841fed7 node DatanodeRegistration(127.0.0.1:34531, datanodeUuid=1fa3454c-4d8f-4868-9178-23bbe0f4f656, infoPort=37763, infoSecurePort=0, ipcPort=43757, storageInfo=lv=-57;cid=testClusterID;nsid=1816791128;c=1730981731858), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:15:33,761 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8f58e5c6c9be506c with lease ID 0xf23bf9af324f0ee0: Processing first storage report for DS-c9ad0ed3-771b-45f5-803c-0a7c7bee2594 from datanode DatanodeRegistration(127.0.0.1:34531, datanodeUuid=1fa3454c-4d8f-4868-9178-23bbe0f4f656, infoPort=37763, infoSecurePort=0, ipcPort=43757, storageInfo=lv=-57;cid=testClusterID;nsid=1816791128;c=1730981731858) 2024-11-07T12:15:33,762 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8f58e5c6c9be506c with lease ID 0xf23bf9af324f0ee0: from storage DS-c9ad0ed3-771b-45f5-803c-0a7c7bee2594 node DatanodeRegistration(127.0.0.1:34531, datanodeUuid=1fa3454c-4d8f-4868-9178-23bbe0f4f656, infoPort=37763, infoSecurePort=0, ipcPort=43757, storageInfo=lv=-57;cid=testClusterID;nsid=1816791128;c=1730981731858), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:15:33,868 WARN [Thread-375 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94a1b264-07f8-bb43-36bf-3de40a7e9255/cluster_671de6c1-df2a-7f63-627e-b06ba20bfa15/dfs/data/data5/current/BP-1589988083-172.17.0.2-1730981731858/current, will proceed with Du for space computation calculation, 2024-11-07T12:15:33,871 WARN [Thread-376 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94a1b264-07f8-bb43-36bf-3de40a7e9255/cluster_671de6c1-df2a-7f63-627e-b06ba20bfa15/dfs/data/data6/current/BP-1589988083-172.17.0.2-1730981731858/current, will proceed with Du for space computation calculation, 2024-11-07T12:15:33,889 WARN [Thread-339 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-07T12:15:33,892 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9d08d8c792683f35 with lease ID 0xf23bf9af324f0ee1: Processing first storage report for DS-b88e4570-5bc6-4927-95af-6ed89cb8be81 from datanode DatanodeRegistration(127.0.0.1:37239, datanodeUuid=bf63df42-278d-49b9-9fd7-4548a8045599, infoPort=44911, infoSecurePort=0, ipcPort=33413, storageInfo=lv=-57;cid=testClusterID;nsid=1816791128;c=1730981731858) 2024-11-07T12:15:33,892 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9d08d8c792683f35 with lease ID 0xf23bf9af324f0ee1: from storage DS-b88e4570-5bc6-4927-95af-6ed89cb8be81 node DatanodeRegistration(127.0.0.1:37239, datanodeUuid=bf63df42-278d-49b9-9fd7-4548a8045599, infoPort=44911, infoSecurePort=0, ipcPort=33413, storageInfo=lv=-57;cid=testClusterID;nsid=1816791128;c=1730981731858), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:15:33,892 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9d08d8c792683f35 with lease ID 0xf23bf9af324f0ee1: Processing first storage report for DS-445604b6-7c62-45d3-a7b5-f710869e2d55 from datanode DatanodeRegistration(127.0.0.1:37239, datanodeUuid=bf63df42-278d-49b9-9fd7-4548a8045599, infoPort=44911, infoSecurePort=0, ipcPort=33413, storageInfo=lv=-57;cid=testClusterID;nsid=1816791128;c=1730981731858) 2024-11-07T12:15:33,892 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9d08d8c792683f35 with lease ID 0xf23bf9af324f0ee1: from storage DS-445604b6-7c62-45d3-a7b5-f710869e2d55 node DatanodeRegistration(127.0.0.1:37239, datanodeUuid=bf63df42-278d-49b9-9fd7-4548a8045599, infoPort=44911, infoSecurePort=0, ipcPort=33413, storageInfo=lv=-57;cid=testClusterID;nsid=1816791128;c=1730981731858), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T12:15:33,921 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94a1b264-07f8-bb43-36bf-3de40a7e9255 2024-11-07T12:15:33,941 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94a1b264-07f8-bb43-36bf-3de40a7e9255/cluster_671de6c1-df2a-7f63-627e-b06ba20bfa15/zookeeper_0, clientPort=52285, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94a1b264-07f8-bb43-36bf-3de40a7e9255/cluster_671de6c1-df2a-7f63-627e-b06ba20bfa15/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94a1b264-07f8-bb43-36bf-3de40a7e9255/cluster_671de6c1-df2a-7f63-627e-b06ba20bfa15/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-07T12:15:33,942 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=52285 2024-11-07T12:15:33,943 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:15:33,946 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:15:33,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34531 is added to blk_1073741825_1001 (size=7) 2024-11-07T12:15:33,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37239 is added to blk_1073741825_1001 (size=7) 2024-11-07T12:15:33,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34265 is added to blk_1073741825_1001 (size=7) 2024-11-07T12:15:33,967 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:44787/user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4 with version=8 2024-11-07T12:15:33,967 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1462): The hbase.fs.tmp.dir is set to hdfs://localhost:42781/user/jenkins/test-data/e9c8b8f4-fd78-acb0-c9f5-a2284facf3e4/hbase-staging 2024-11-07T12:15:33,969 INFO [Time-limited test {}] client.ConnectionUtils(129): master/8dcb99358a44:0 server-side Connection retries=45 2024-11-07T12:15:33,969 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T12:15:33,969 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-07T12:15:33,969 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-07T12:15:33,969 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T12:15:33,969 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-07T12:15:33,970 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-07T12:15:33,970 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-07T12:15:33,970 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:34129 2024-11-07T12:15:33,971 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:15:33,972 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:15:33,975 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:34129 connecting to ZooKeeper ensemble=127.0.0.1:52285 2024-11-07T12:15:34,028 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:341290x0, quorum=127.0.0.1:52285, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-07T12:15:34,029 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:34129-0x1011682c8a80000 connected 2024-11-07T12:15:34,096 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34129-0x1011682c8a80000, quorum=127.0.0.1:52285, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-07T12:15:34,099 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34129-0x1011682c8a80000, quorum=127.0.0.1:52285, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T12:15:34,101 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34129-0x1011682c8a80000, quorum=127.0.0.1:52285, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-07T12:15:34,103 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34129 2024-11-07T12:15:34,103 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34129 2024-11-07T12:15:34,103 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34129 2024-11-07T12:15:34,104 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34129 2024-11-07T12:15:34,104 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34129 2024-11-07T12:15:34,105 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:44787/user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4, hbase.cluster.distributed=false 2024-11-07T12:15:34,128 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/8dcb99358a44:0 server-side Connection retries=45 2024-11-07T12:15:34,128 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T12:15:34,128 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-07T12:15:34,128 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-07T12:15:34,128 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T12:15:34,128 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-07T12:15:34,128 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-07T12:15:34,128 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-07T12:15:34,129 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:36801 2024-11-07T12:15:34,129 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-07T12:15:34,130 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-07T12:15:34,131 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:15:34,133 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:15:34,137 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:36801 connecting to ZooKeeper ensemble=127.0.0.1:52285 2024-11-07T12:15:34,145 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:368010x0, quorum=127.0.0.1:52285, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-07T12:15:34,145 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:368010x0, quorum=127.0.0.1:52285, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-07T12:15:34,145 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36801-0x1011682c8a80001 connected 2024-11-07T12:15:34,146 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36801-0x1011682c8a80001, quorum=127.0.0.1:52285, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T12:15:34,147 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36801-0x1011682c8a80001, quorum=127.0.0.1:52285, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-07T12:15:34,148 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36801 2024-11-07T12:15:34,149 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36801 2024-11-07T12:15:34,151 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36801 2024-11-07T12:15:34,152 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36801 2024-11-07T12:15:34,152 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36801 2024-11-07T12:15:34,186 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/8dcb99358a44:0 server-side Connection retries=45 2024-11-07T12:15:34,186 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T12:15:34,186 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-07T12:15:34,187 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-07T12:15:34,187 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T12:15:34,187 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-07T12:15:34,187 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-07T12:15:34,187 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-07T12:15:34,188 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:44347 2024-11-07T12:15:34,188 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-07T12:15:34,189 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-07T12:15:34,190 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:15:34,191 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:15:34,194 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:44347 connecting to ZooKeeper ensemble=127.0.0.1:52285 2024-11-07T12:15:34,209 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:443470x0, quorum=127.0.0.1:52285, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-07T12:15:34,209 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:443470x0, quorum=127.0.0.1:52285, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-07T12:15:34,209 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44347-0x1011682c8a80002 connected 2024-11-07T12:15:34,210 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44347-0x1011682c8a80002, quorum=127.0.0.1:52285, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T12:15:34,211 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44347-0x1011682c8a80002, quorum=127.0.0.1:52285, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-07T12:15:34,219 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44347 2024-11-07T12:15:34,219 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44347 2024-11-07T12:15:34,219 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44347 2024-11-07T12:15:34,220 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44347 2024-11-07T12:15:34,220 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44347 2024-11-07T12:15:34,267 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/8dcb99358a44:0 server-side Connection retries=45 2024-11-07T12:15:34,267 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T12:15:34,267 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-07T12:15:34,267 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-07T12:15:34,267 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T12:15:34,268 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-07T12:15:34,268 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-07T12:15:34,268 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-07T12:15:34,269 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:46073 2024-11-07T12:15:34,270 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-07T12:15:34,271 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-07T12:15:34,272 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:15:34,274 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:15:34,278 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:46073 connecting to ZooKeeper ensemble=127.0.0.1:52285 2024-11-07T12:15:34,286 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:460730x0, quorum=127.0.0.1:52285, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-07T12:15:34,287 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46073-0x1011682c8a80003 connected 2024-11-07T12:15:34,287 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46073-0x1011682c8a80003, quorum=127.0.0.1:52285, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-07T12:15:34,288 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46073-0x1011682c8a80003, quorum=127.0.0.1:52285, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T12:15:34,288 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46073-0x1011682c8a80003, quorum=127.0.0.1:52285, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-07T12:15:34,289 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46073 2024-11-07T12:15:34,289 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46073 2024-11-07T12:15:34,289 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46073 2024-11-07T12:15:34,290 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46073 2024-11-07T12:15:34,290 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46073 2024-11-07T12:15:34,291 INFO [master/8dcb99358a44:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/8dcb99358a44,34129,1730981733968 2024-11-07T12:15:34,300 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44347-0x1011682c8a80002, quorum=127.0.0.1:52285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T12:15:34,300 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46073-0x1011682c8a80003, quorum=127.0.0.1:52285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T12:15:34,300 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36801-0x1011682c8a80001, quorum=127.0.0.1:52285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T12:15:34,300 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34129-0x1011682c8a80000, quorum=127.0.0.1:52285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T12:15:34,301 DEBUG [master/8dcb99358a44:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34129-0x1011682c8a80000, quorum=127.0.0.1:52285, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/8dcb99358a44,34129,1730981733968 2024-11-07T12:15:34,305 DEBUG [M:0;8dcb99358a44:34129 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;8dcb99358a44:34129 2024-11-07T12:15:34,311 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44347-0x1011682c8a80002, quorum=127.0.0.1:52285, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-07T12:15:34,311 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34129-0x1011682c8a80000, quorum=127.0.0.1:52285, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-07T12:15:34,311 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44347-0x1011682c8a80002, quorum=127.0.0.1:52285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:15:34,311 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46073-0x1011682c8a80003, quorum=127.0.0.1:52285, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-07T12:15:34,311 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34129-0x1011682c8a80000, quorum=127.0.0.1:52285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:15:34,311 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36801-0x1011682c8a80001, quorum=127.0.0.1:52285, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-07T12:15:34,311 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46073-0x1011682c8a80003, quorum=127.0.0.1:52285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:15:34,311 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36801-0x1011682c8a80001, quorum=127.0.0.1:52285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:15:34,312 DEBUG [master/8dcb99358a44:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34129-0x1011682c8a80000, quorum=127.0.0.1:52285, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-07T12:15:34,312 INFO [master/8dcb99358a44:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/8dcb99358a44,34129,1730981733968 from backup master directory 2024-11-07T12:15:34,319 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44347-0x1011682c8a80002, quorum=127.0.0.1:52285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T12:15:34,319 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34129-0x1011682c8a80000, quorum=127.0.0.1:52285, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/8dcb99358a44,34129,1730981733968 2024-11-07T12:15:34,319 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46073-0x1011682c8a80003, quorum=127.0.0.1:52285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T12:15:34,319 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36801-0x1011682c8a80001, quorum=127.0.0.1:52285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T12:15:34,319 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34129-0x1011682c8a80000, quorum=127.0.0.1:52285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T12:15:34,319 WARN [master/8dcb99358a44:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-07T12:15:34,320 INFO [master/8dcb99358a44:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=8dcb99358a44,34129,1730981733968 2024-11-07T12:15:34,320 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:34129-0x1011682c8a80000, quorum=127.0.0.1:52285, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-07T12:15:34,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34265 is added to blk_1073741826_1002 (size=42) 2024-11-07T12:15:34,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34531 is added to blk_1073741826_1002 (size=42) 2024-11-07T12:15:34,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37239 is added to blk_1073741826_1002 (size=42) 2024-11-07T12:15:34,340 DEBUG [master/8dcb99358a44:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:44787/user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/hbase.id with ID: 1b3b0453-9a8b-4748-8dfe-d563bd9ba056 2024-11-07T12:15:34,359 INFO [master/8dcb99358a44:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T12:15:34,369 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34129-0x1011682c8a80000, quorum=127.0.0.1:52285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:15:34,369 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36801-0x1011682c8a80001, quorum=127.0.0.1:52285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:15:34,369 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44347-0x1011682c8a80002, quorum=127.0.0.1:52285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:15:34,369 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46073-0x1011682c8a80003, quorum=127.0.0.1:52285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:15:34,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34265 is added to blk_1073741827_1003 (size=196) 2024-11-07T12:15:34,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34531 is added to blk_1073741827_1003 (size=196) 2024-11-07T12:15:34,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37239 is added to blk_1073741827_1003 (size=196) 2024-11-07T12:15:34,381 INFO [master/8dcb99358a44:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-07T12:15:34,381 INFO [master/8dcb99358a44:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-07T12:15:34,382 INFO [master/8dcb99358a44:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-07T12:15:34,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34531 is added to blk_1073741828_1004 (size=1189) 2024-11-07T12:15:34,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37239 is added to blk_1073741828_1004 (size=1189) 2024-11-07T12:15:34,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34265 is added to blk_1073741828_1004 (size=1189) 2024-11-07T12:15:34,397 INFO [master/8dcb99358a44:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:44787/user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/MasterData/data/master/store 2024-11-07T12:15:34,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34531 is added to blk_1073741829_1005 (size=34) 2024-11-07T12:15:34,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37239 is added to blk_1073741829_1005 (size=34) 2024-11-07T12:15:34,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34265 is added to blk_1073741829_1005 (size=34) 2024-11-07T12:15:34,412 INFO [master/8dcb99358a44:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-07T12:15:34,413 DEBUG [master/8dcb99358a44:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:15:34,414 DEBUG [master/8dcb99358a44:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-07T12:15:34,414 INFO [master/8dcb99358a44:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:15:34,414 DEBUG [master/8dcb99358a44:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:15:34,414 DEBUG [master/8dcb99358a44:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-07T12:15:34,414 DEBUG [master/8dcb99358a44:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:15:34,414 INFO [master/8dcb99358a44:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:15:34,414 DEBUG [master/8dcb99358a44:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-07T12:15:34,416 WARN [master/8dcb99358a44:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:44787/user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/MasterData/data/master/store/.initializing 2024-11-07T12:15:34,416 DEBUG [master/8dcb99358a44:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:44787/user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/MasterData/WALs/8dcb99358a44,34129,1730981733968 2024-11-07T12:15:34,432 INFO [master/8dcb99358a44:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=8dcb99358a44%2C34129%2C1730981733968, suffix=, logDir=hdfs://localhost:44787/user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/MasterData/WALs/8dcb99358a44,34129,1730981733968, archiveDir=hdfs://localhost:44787/user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/MasterData/oldWALs, maxLogs=10 2024-11-07T12:15:34,441 INFO [master/8dcb99358a44:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 8dcb99358a44%2C34129%2C1730981733968.1730981734439 2024-11-07T12:15:34,441 DEBUG [master/8dcb99358a44:0:becomeActiveMaster {}] util.CommonFSUtils$DfsBuilderUtility(752): Using builder API via reflection for DFS file creation replicate flag. 2024-11-07T12:15:34,441 DEBUG [master/8dcb99358a44:0:becomeActiveMaster {}] util.CommonFSUtils$DfsBuilderUtility(762): Using builder API via reflection for DFS file creation noLocalWrite flag. 2024-11-07T12:15:34,460 INFO [master/8dcb99358a44:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/MasterData/WALs/8dcb99358a44,34129,1730981733968/8dcb99358a44%2C34129%2C1730981733968.1730981734439 2024-11-07T12:15:34,466 DEBUG [master/8dcb99358a44:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35009:35009),(127.0.0.1/127.0.0.1:44911:44911),(127.0.0.1/127.0.0.1:37763:37763)] 2024-11-07T12:15:34,467 DEBUG [master/8dcb99358a44:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-07T12:15:34,467 DEBUG [master/8dcb99358a44:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:15:34,469 DEBUG [master/8dcb99358a44:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:15:34,470 DEBUG [master/8dcb99358a44:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:15:34,504 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:15:34,531 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-07T12:15:34,535 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:15:34,537 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:15:34,537 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:15:34,540 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-07T12:15:34,540 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:15:34,541 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T12:15:34,541 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:15:34,544 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-07T12:15:34,544 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:15:34,545 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T12:15:34,545 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:15:34,548 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-07T12:15:34,548 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:15:34,549 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T12:15:34,551 DEBUG [master/8dcb99358a44:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44787/user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:15:34,552 DEBUG [master/8dcb99358a44:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44787/user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:15:34,559 DEBUG [master/8dcb99358a44:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-07T12:15:34,562 DEBUG [master/8dcb99358a44:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T12:15:34,565 DEBUG [master/8dcb99358a44:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44787/user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-07T12:15:34,566 INFO [master/8dcb99358a44:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73143301, jitterRate=0.08992011845111847}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-07T12:15:34,571 DEBUG [master/8dcb99358a44:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-07T12:15:34,572 INFO [master/8dcb99358a44:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-07T12:15:34,594 DEBUG [master/8dcb99358a44:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@540d976d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T12:15:34,620 INFO [master/8dcb99358a44:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-11-07T12:15:34,629 INFO [master/8dcb99358a44:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-07T12:15:34,629 INFO [master/8dcb99358a44:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-07T12:15:34,631 INFO [master/8dcb99358a44:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-07T12:15:34,632 INFO [master/8dcb99358a44:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 0 msec 2024-11-07T12:15:34,636 INFO [master/8dcb99358a44:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 3 msec 2024-11-07T12:15:34,636 INFO [master/8dcb99358a44:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-07T12:15:34,656 INFO [master/8dcb99358a44:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-07T12:15:34,665 DEBUG [master/8dcb99358a44:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34129-0x1011682c8a80000, quorum=127.0.0.1:52285, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-07T12:15:34,675 DEBUG [master/8dcb99358a44:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-11-07T12:15:34,677 INFO [master/8dcb99358a44:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-07T12:15:34,678 DEBUG [master/8dcb99358a44:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34129-0x1011682c8a80000, quorum=127.0.0.1:52285, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-07T12:15:34,686 DEBUG [master/8dcb99358a44:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-11-07T12:15:34,688 INFO [master/8dcb99358a44:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-07T12:15:34,691 DEBUG [master/8dcb99358a44:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34129-0x1011682c8a80000, quorum=127.0.0.1:52285, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-07T12:15:34,700 DEBUG [master/8dcb99358a44:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-11-07T12:15:34,701 DEBUG [master/8dcb99358a44:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34129-0x1011682c8a80000, quorum=127.0.0.1:52285, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-07T12:15:34,711 DEBUG [master/8dcb99358a44:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-11-07T12:15:34,722 DEBUG [master/8dcb99358a44:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34129-0x1011682c8a80000, quorum=127.0.0.1:52285, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-07T12:15:34,728 DEBUG [master/8dcb99358a44:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-07T12:15:34,736 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34129-0x1011682c8a80000, quorum=127.0.0.1:52285, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-07T12:15:34,736 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44347-0x1011682c8a80002, quorum=127.0.0.1:52285, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-07T12:15:34,736 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36801-0x1011682c8a80001, quorum=127.0.0.1:52285, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-07T12:15:34,736 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46073-0x1011682c8a80003, quorum=127.0.0.1:52285, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-07T12:15:34,736 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44347-0x1011682c8a80002, quorum=127.0.0.1:52285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:15:34,736 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34129-0x1011682c8a80000, quorum=127.0.0.1:52285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:15:34,736 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46073-0x1011682c8a80003, quorum=127.0.0.1:52285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:15:34,736 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36801-0x1011682c8a80001, quorum=127.0.0.1:52285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:15:34,737 INFO [master/8dcb99358a44:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=8dcb99358a44,34129,1730981733968, sessionid=0x1011682c8a80000, setting cluster-up flag (Was=false) 2024-11-07T12:15:34,761 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34129-0x1011682c8a80000, quorum=127.0.0.1:52285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:15:34,761 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46073-0x1011682c8a80003, quorum=127.0.0.1:52285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:15:34,761 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36801-0x1011682c8a80001, quorum=127.0.0.1:52285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:15:34,761 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44347-0x1011682c8a80002, quorum=127.0.0.1:52285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:15:34,786 DEBUG [master/8dcb99358a44:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-07T12:15:34,789 DEBUG [master/8dcb99358a44:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=8dcb99358a44,34129,1730981733968 2024-11-07T12:15:34,811 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44347-0x1011682c8a80002, quorum=127.0.0.1:52285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:15:34,811 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46073-0x1011682c8a80003, quorum=127.0.0.1:52285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:15:34,811 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34129-0x1011682c8a80000, quorum=127.0.0.1:52285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:15:34,811 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36801-0x1011682c8a80001, quorum=127.0.0.1:52285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:15:34,836 DEBUG [master/8dcb99358a44:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-07T12:15:34,840 DEBUG [master/8dcb99358a44:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=8dcb99358a44,34129,1730981733968 2024-11-07T12:15:34,902 DEBUG [RS:1;8dcb99358a44:44347 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;8dcb99358a44:44347 2024-11-07T12:15:34,902 DEBUG [RS:2;8dcb99358a44:46073 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;8dcb99358a44:46073 2024-11-07T12:15:34,904 INFO [RS:1;8dcb99358a44:44347 {}] regionserver.HRegionServer(1008): ClusterId : 1b3b0453-9a8b-4748-8dfe-d563bd9ba056 2024-11-07T12:15:34,904 INFO [RS:2;8dcb99358a44:46073 {}] regionserver.HRegionServer(1008): ClusterId : 1b3b0453-9a8b-4748-8dfe-d563bd9ba056 2024-11-07T12:15:34,906 DEBUG [RS:1;8dcb99358a44:44347 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-07T12:15:34,906 DEBUG [RS:2;8dcb99358a44:46073 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-07T12:15:34,907 DEBUG [RS:0;8dcb99358a44:36801 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;8dcb99358a44:36801 2024-11-07T12:15:34,917 DEBUG [master/8dcb99358a44:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-11-07T12:15:34,922 INFO [master/8dcb99358a44:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-11-07T12:15:34,924 INFO [master/8dcb99358a44:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-07T12:15:34,925 INFO [RS:0;8dcb99358a44:36801 {}] regionserver.HRegionServer(1008): ClusterId : 1b3b0453-9a8b-4748-8dfe-d563bd9ba056 2024-11-07T12:15:34,925 DEBUG [RS:0;8dcb99358a44:36801 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-07T12:15:34,927 DEBUG [RS:2;8dcb99358a44:46073 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-07T12:15:34,927 DEBUG [RS:2;8dcb99358a44:46073 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-07T12:15:34,928 DEBUG [master/8dcb99358a44:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 8dcb99358a44,34129,1730981733968 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-07T12:15:34,931 DEBUG [master/8dcb99358a44:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/8dcb99358a44:0, corePoolSize=5, maxPoolSize=5 2024-11-07T12:15:34,931 DEBUG [master/8dcb99358a44:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/8dcb99358a44:0, corePoolSize=5, maxPoolSize=5 2024-11-07T12:15:34,931 DEBUG [master/8dcb99358a44:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/8dcb99358a44:0, corePoolSize=5, maxPoolSize=5 2024-11-07T12:15:34,931 DEBUG [master/8dcb99358a44:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/8dcb99358a44:0, corePoolSize=5, maxPoolSize=5 2024-11-07T12:15:34,932 DEBUG [master/8dcb99358a44:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/8dcb99358a44:0, corePoolSize=10, maxPoolSize=10 2024-11-07T12:15:34,932 DEBUG [master/8dcb99358a44:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/8dcb99358a44:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:15:34,932 DEBUG [master/8dcb99358a44:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/8dcb99358a44:0, corePoolSize=2, maxPoolSize=2 2024-11-07T12:15:34,932 DEBUG [master/8dcb99358a44:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/8dcb99358a44:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:15:34,933 INFO [master/8dcb99358a44:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1730981764933 2024-11-07T12:15:34,934 INFO [master/8dcb99358a44:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-07T12:15:34,935 INFO [master/8dcb99358a44:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-07T12:15:34,936 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-11-07T12:15:34,936 DEBUG [RS:1;8dcb99358a44:44347 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-07T12:15:34,936 DEBUG [RS:1;8dcb99358a44:44347 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-07T12:15:34,936 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-11-07T12:15:34,938 INFO [master/8dcb99358a44:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-07T12:15:34,938 INFO [master/8dcb99358a44:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-07T12:15:34,938 INFO [master/8dcb99358a44:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-07T12:15:34,938 INFO [master/8dcb99358a44:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-07T12:15:34,939 INFO [master/8dcb99358a44:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-07T12:15:34,940 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:15:34,941 INFO [master/8dcb99358a44:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-07T12:15:34,941 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-07T12:15:34,942 INFO [master/8dcb99358a44:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-07T12:15:34,942 INFO [master/8dcb99358a44:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-07T12:15:34,944 INFO [master/8dcb99358a44:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-07T12:15:34,944 INFO [master/8dcb99358a44:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-07T12:15:34,946 DEBUG [master/8dcb99358a44:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/8dcb99358a44:0:becomeActiveMaster-HFileCleaner.large.0-1730981734945,5,FailOnTimeoutGroup] 2024-11-07T12:15:34,946 DEBUG [master/8dcb99358a44:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/8dcb99358a44:0:becomeActiveMaster-HFileCleaner.small.0-1730981734946,5,FailOnTimeoutGroup] 2024-11-07T12:15:34,946 INFO [master/8dcb99358a44:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-07T12:15:34,946 INFO [master/8dcb99358a44:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-07T12:15:34,947 INFO [master/8dcb99358a44:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-07T12:15:34,947 INFO [master/8dcb99358a44:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-07T12:15:34,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37239 is added to blk_1073741831_1007 (size=1039) 2024-11-07T12:15:34,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34265 is added to blk_1073741831_1007 (size=1039) 2024-11-07T12:15:34,953 DEBUG [RS:0;8dcb99358a44:36801 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-07T12:15:34,953 DEBUG [RS:0;8dcb99358a44:36801 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-07T12:15:34,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34531 is added to blk_1073741831_1007 (size=1039) 2024-11-07T12:15:34,954 DEBUG [RS:2;8dcb99358a44:46073 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-07T12:15:34,954 DEBUG [RS:2;8dcb99358a44:46073 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@671c73aa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T12:15:34,956 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:44787/user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-11-07T12:15:34,956 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:44787/user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4 2024-11-07T12:15:34,956 DEBUG [RS:2;8dcb99358a44:46073 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d7f5404, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=8dcb99358a44/172.17.0.2:0 2024-11-07T12:15:34,960 INFO [RS:2;8dcb99358a44:46073 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-11-07T12:15:34,960 INFO [RS:2;8dcb99358a44:46073 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-11-07T12:15:34,960 DEBUG [RS:2;8dcb99358a44:46073 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-11-07T12:15:34,961 DEBUG [RS:1;8dcb99358a44:44347 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-07T12:15:34,962 DEBUG [RS:1;8dcb99358a44:44347 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@369e4775, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T12:15:34,962 DEBUG [RS:0;8dcb99358a44:36801 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-07T12:15:34,962 DEBUG [RS:1;8dcb99358a44:44347 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@55058db9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=8dcb99358a44/172.17.0.2:0 2024-11-07T12:15:34,962 INFO [RS:1;8dcb99358a44:44347 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-11-07T12:15:34,962 DEBUG [RS:0;8dcb99358a44:36801 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1fd0649a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T12:15:34,962 INFO [RS:1;8dcb99358a44:44347 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-11-07T12:15:34,962 DEBUG [RS:1;8dcb99358a44:44347 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-11-07T12:15:34,963 INFO [RS:2;8dcb99358a44:46073 {}] regionserver.HRegionServer(3073): reportForDuty to master=8dcb99358a44,34129,1730981733968 with isa=8dcb99358a44/172.17.0.2:46073, startcode=1730981734237 2024-11-07T12:15:34,963 DEBUG [RS:0;8dcb99358a44:36801 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4d189c93, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=8dcb99358a44/172.17.0.2:0 2024-11-07T12:15:34,963 INFO [RS:0;8dcb99358a44:36801 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-11-07T12:15:34,963 INFO [RS:0;8dcb99358a44:36801 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-11-07T12:15:34,963 DEBUG [RS:0;8dcb99358a44:36801 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-11-07T12:15:34,964 INFO [RS:1;8dcb99358a44:44347 {}] regionserver.HRegionServer(3073): reportForDuty to master=8dcb99358a44,34129,1730981733968 with isa=8dcb99358a44/172.17.0.2:44347, startcode=1730981734185 2024-11-07T12:15:34,964 INFO [RS:0;8dcb99358a44:36801 {}] regionserver.HRegionServer(3073): reportForDuty to master=8dcb99358a44,34129,1730981733968 with isa=8dcb99358a44/172.17.0.2:36801, startcode=1730981734127 2024-11-07T12:15:34,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34265 is added to blk_1073741832_1008 (size=32) 2024-11-07T12:15:34,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34531 is added to blk_1073741832_1008 (size=32) 2024-11-07T12:15:34,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37239 is added to blk_1073741832_1008 (size=32) 2024-11-07T12:15:34,971 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:15:34,973 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-07T12:15:34,975 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-07T12:15:34,975 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:15:34,976 DEBUG [RS:0;8dcb99358a44:36801 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-07T12:15:34,976 DEBUG [RS:2;8dcb99358a44:46073 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-07T12:15:34,976 DEBUG [RS:1;8dcb99358a44:44347 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-07T12:15:34,976 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:15:34,976 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-07T12:15:34,978 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-07T12:15:34,978 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:15:34,979 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:15:34,980 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-07T12:15:34,982 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-07T12:15:34,982 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:15:34,982 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:15:34,984 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44787/user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/data/hbase/meta/1588230740 2024-11-07T12:15:34,985 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44787/user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/data/hbase/meta/1588230740 2024-11-07T12:15:34,988 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-07T12:15:34,991 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-11-07T12:15:34,994 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44787/user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-07T12:15:34,995 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60015433, jitterRate=-0.10570035874843597}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-07T12:15:34,999 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-11-07T12:15:34,999 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-11-07T12:15:34,999 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-11-07T12:15:34,999 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-11-07T12:15:34,999 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-07T12:15:34,999 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-11-07T12:15:35,000 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-11-07T12:15:35,000 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-11-07T12:15:35,003 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-11-07T12:15:35,003 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-11-07T12:15:35,009 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-07T12:15:35,017 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-07T12:15:35,019 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-07T12:15:35,039 INFO [RS-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37633, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-07T12:15:35,039 INFO [RS-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33561, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-07T12:15:35,039 INFO [RS-EventLoopGroup-6-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48703, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-07T12:15:35,046 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34129 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 8dcb99358a44,46073,1730981734237 2024-11-07T12:15:35,048 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34129 {}] master.ServerManager(486): Registering regionserver=8dcb99358a44,46073,1730981734237 2024-11-07T12:15:35,060 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34129 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 8dcb99358a44,36801,1730981734127 2024-11-07T12:15:35,060 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34129 {}] master.ServerManager(486): Registering regionserver=8dcb99358a44,36801,1730981734127 2024-11-07T12:15:35,062 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34129 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 8dcb99358a44,44347,1730981734185 2024-11-07T12:15:35,063 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34129 {}] master.ServerManager(486): Registering regionserver=8dcb99358a44,44347,1730981734185 2024-11-07T12:15:35,063 DEBUG [RS:0;8dcb99358a44:36801 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:44787/user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4 2024-11-07T12:15:35,063 DEBUG [RS:2;8dcb99358a44:46073 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:44787/user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4 2024-11-07T12:15:35,063 DEBUG [RS:0;8dcb99358a44:36801 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:44787 2024-11-07T12:15:35,063 DEBUG [RS:2;8dcb99358a44:46073 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:44787 2024-11-07T12:15:35,063 DEBUG [RS:0;8dcb99358a44:36801 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-11-07T12:15:35,063 DEBUG [RS:2;8dcb99358a44:46073 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-11-07T12:15:35,065 DEBUG [RS:1;8dcb99358a44:44347 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:44787/user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4 2024-11-07T12:15:35,065 DEBUG [RS:1;8dcb99358a44:44347 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:44787 2024-11-07T12:15:35,065 DEBUG [RS:1;8dcb99358a44:44347 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-11-07T12:15:35,111 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34129-0x1011682c8a80000, quorum=127.0.0.1:52285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-07T12:15:35,111 DEBUG [RS:1;8dcb99358a44:44347 {}] zookeeper.ZKUtil(111): regionserver:44347-0x1011682c8a80002, quorum=127.0.0.1:52285, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/8dcb99358a44,44347,1730981734185 2024-11-07T12:15:35,111 DEBUG [RS:0;8dcb99358a44:36801 {}] zookeeper.ZKUtil(111): regionserver:36801-0x1011682c8a80001, quorum=127.0.0.1:52285, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/8dcb99358a44,36801,1730981734127 2024-11-07T12:15:35,111 WARN [RS:1;8dcb99358a44:44347 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-07T12:15:35,111 DEBUG [RS:2;8dcb99358a44:46073 {}] zookeeper.ZKUtil(111): regionserver:46073-0x1011682c8a80003, quorum=127.0.0.1:52285, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/8dcb99358a44,46073,1730981734237 2024-11-07T12:15:35,111 WARN [RS:0;8dcb99358a44:36801 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-07T12:15:35,111 INFO [RS:1;8dcb99358a44:44347 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-07T12:15:35,111 WARN [RS:2;8dcb99358a44:46073 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-07T12:15:35,111 INFO [RS:0;8dcb99358a44:36801 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-07T12:15:35,111 INFO [RS:2;8dcb99358a44:46073 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-07T12:15:35,111 DEBUG [RS:2;8dcb99358a44:46073 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:44787/user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/WALs/8dcb99358a44,46073,1730981734237 2024-11-07T12:15:35,111 DEBUG [RS:0;8dcb99358a44:36801 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:44787/user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/WALs/8dcb99358a44,36801,1730981734127 2024-11-07T12:15:35,111 DEBUG [RS:1;8dcb99358a44:44347 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:44787/user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/WALs/8dcb99358a44,44347,1730981734185 2024-11-07T12:15:35,114 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [8dcb99358a44,46073,1730981734237] 2024-11-07T12:15:35,114 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [8dcb99358a44,36801,1730981734127] 2024-11-07T12:15:35,114 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [8dcb99358a44,44347,1730981734185] 2024-11-07T12:15:35,124 DEBUG [RS:2;8dcb99358a44:46073 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-11-07T12:15:35,124 DEBUG [RS:0;8dcb99358a44:36801 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-11-07T12:15:35,124 DEBUG [RS:1;8dcb99358a44:44347 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-11-07T12:15:35,135 INFO [RS:2;8dcb99358a44:46073 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-07T12:15:35,135 INFO [RS:1;8dcb99358a44:44347 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-07T12:15:35,135 INFO [RS:0;8dcb99358a44:36801 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-07T12:15:35,149 INFO [RS:2;8dcb99358a44:46073 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-07T12:15:35,149 INFO [RS:1;8dcb99358a44:44347 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-07T12:15:35,149 INFO [RS:0;8dcb99358a44:36801 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-07T12:15:35,151 INFO [RS:0;8dcb99358a44:36801 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-07T12:15:35,151 INFO [RS:2;8dcb99358a44:46073 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-07T12:15:35,151 INFO [RS:1;8dcb99358a44:44347 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-07T12:15:35,151 INFO [RS:2;8dcb99358a44:46073 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T12:15:35,151 INFO [RS:0;8dcb99358a44:36801 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T12:15:35,151 INFO [RS:1;8dcb99358a44:44347 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T12:15:35,152 INFO [RS:2;8dcb99358a44:46073 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-11-07T12:15:35,152 INFO [RS:0;8dcb99358a44:36801 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-11-07T12:15:35,152 INFO [RS:1;8dcb99358a44:44347 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-11-07T12:15:35,159 INFO [RS:0;8dcb99358a44:36801 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-07T12:15:35,159 INFO [RS:2;8dcb99358a44:46073 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-07T12:15:35,159 INFO [RS:1;8dcb99358a44:44347 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-07T12:15:35,159 DEBUG [RS:0;8dcb99358a44:36801 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/8dcb99358a44:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:15:35,159 DEBUG [RS:2;8dcb99358a44:46073 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/8dcb99358a44:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:15:35,159 DEBUG [RS:1;8dcb99358a44:44347 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/8dcb99358a44:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:15:35,159 DEBUG [RS:1;8dcb99358a44:44347 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/8dcb99358a44:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:15:35,159 DEBUG [RS:2;8dcb99358a44:46073 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/8dcb99358a44:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:15:35,159 DEBUG [RS:0;8dcb99358a44:36801 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/8dcb99358a44:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:15:35,159 DEBUG [RS:1;8dcb99358a44:44347 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/8dcb99358a44:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:15:35,159 DEBUG [RS:2;8dcb99358a44:46073 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/8dcb99358a44:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:15:35,159 DEBUG [RS:0;8dcb99358a44:36801 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/8dcb99358a44:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:15:35,159 DEBUG [RS:1;8dcb99358a44:44347 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/8dcb99358a44:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:15:35,159 DEBUG [RS:0;8dcb99358a44:36801 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/8dcb99358a44:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:15:35,159 DEBUG [RS:2;8dcb99358a44:46073 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/8dcb99358a44:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:15:35,159 DEBUG [RS:1;8dcb99358a44:44347 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/8dcb99358a44:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:15:35,159 DEBUG [RS:2;8dcb99358a44:46073 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/8dcb99358a44:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:15:35,159 DEBUG [RS:0;8dcb99358a44:36801 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/8dcb99358a44:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:15:35,159 DEBUG [RS:1;8dcb99358a44:44347 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/8dcb99358a44:0, corePoolSize=2, maxPoolSize=2 2024-11-07T12:15:35,159 DEBUG [RS:2;8dcb99358a44:46073 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/8dcb99358a44:0, corePoolSize=2, maxPoolSize=2 2024-11-07T12:15:35,159 DEBUG [RS:0;8dcb99358a44:36801 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/8dcb99358a44:0, corePoolSize=2, maxPoolSize=2 2024-11-07T12:15:35,159 DEBUG [RS:1;8dcb99358a44:44347 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/8dcb99358a44:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:15:35,159 DEBUG [RS:0;8dcb99358a44:36801 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/8dcb99358a44:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:15:35,159 DEBUG [RS:2;8dcb99358a44:46073 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/8dcb99358a44:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:15:35,160 DEBUG [RS:1;8dcb99358a44:44347 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/8dcb99358a44:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:15:35,160 DEBUG [RS:0;8dcb99358a44:36801 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/8dcb99358a44:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:15:35,160 DEBUG [RS:2;8dcb99358a44:46073 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/8dcb99358a44:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:15:35,160 DEBUG [RS:1;8dcb99358a44:44347 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/8dcb99358a44:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:15:35,160 DEBUG [RS:2;8dcb99358a44:46073 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/8dcb99358a44:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:15:35,160 DEBUG [RS:0;8dcb99358a44:36801 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/8dcb99358a44:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:15:35,160 DEBUG [RS:1;8dcb99358a44:44347 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/8dcb99358a44:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:15:35,160 DEBUG [RS:0;8dcb99358a44:36801 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/8dcb99358a44:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:15:35,160 DEBUG [RS:2;8dcb99358a44:46073 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/8dcb99358a44:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:15:35,160 DEBUG [RS:1;8dcb99358a44:44347 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/8dcb99358a44:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:15:35,160 DEBUG [RS:2;8dcb99358a44:46073 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/8dcb99358a44:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:15:35,160 DEBUG [RS:0;8dcb99358a44:36801 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/8dcb99358a44:0, corePoolSize=1, maxPoolSize=1 2024-11-07T12:15:35,160 DEBUG [RS:1;8dcb99358a44:44347 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/8dcb99358a44:0, corePoolSize=3, maxPoolSize=3 2024-11-07T12:15:35,160 DEBUG [RS:0;8dcb99358a44:36801 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/8dcb99358a44:0, corePoolSize=3, maxPoolSize=3 2024-11-07T12:15:35,160 DEBUG [RS:2;8dcb99358a44:46073 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/8dcb99358a44:0, corePoolSize=3, maxPoolSize=3 2024-11-07T12:15:35,160 DEBUG [RS:1;8dcb99358a44:44347 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/8dcb99358a44:0, corePoolSize=3, maxPoolSize=3 2024-11-07T12:15:35,160 DEBUG [RS:0;8dcb99358a44:36801 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/8dcb99358a44:0, corePoolSize=3, maxPoolSize=3 2024-11-07T12:15:35,160 DEBUG [RS:2;8dcb99358a44:46073 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/8dcb99358a44:0, corePoolSize=3, maxPoolSize=3 2024-11-07T12:15:35,162 INFO [RS:1;8dcb99358a44:44347 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-07T12:15:35,162 INFO [RS:2;8dcb99358a44:46073 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-07T12:15:35,162 INFO [RS:1;8dcb99358a44:44347 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-07T12:15:35,162 INFO [RS:2;8dcb99358a44:46073 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-07T12:15:35,162 INFO [RS:1;8dcb99358a44:44347 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-07T12:15:35,162 INFO [RS:2;8dcb99358a44:46073 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-07T12:15:35,162 INFO [RS:1;8dcb99358a44:44347 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-07T12:15:35,162 INFO [RS:2;8dcb99358a44:46073 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-07T12:15:35,162 INFO [RS:1;8dcb99358a44:44347 {}] hbase.ChoreService(168): Chore ScheduledChore name=8dcb99358a44,44347,1730981734185-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-07T12:15:35,162 INFO [RS:2;8dcb99358a44:46073 {}] hbase.ChoreService(168): Chore ScheduledChore name=8dcb99358a44,46073,1730981734237-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-07T12:15:35,162 INFO [RS:0;8dcb99358a44:36801 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-07T12:15:35,162 INFO [RS:0;8dcb99358a44:36801 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-07T12:15:35,162 INFO [RS:0;8dcb99358a44:36801 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-07T12:15:35,162 INFO [RS:0;8dcb99358a44:36801 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-07T12:15:35,162 INFO [RS:0;8dcb99358a44:36801 {}] hbase.ChoreService(168): Chore ScheduledChore name=8dcb99358a44,36801,1730981734127-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-07T12:15:35,170 WARN [8dcb99358a44:34129 {}] assignment.AssignmentManager(2423): No servers available; cannot place 1 unassigned regions. 2024-11-07T12:15:35,180 INFO [RS:2;8dcb99358a44:46073 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-07T12:15:35,180 INFO [RS:0;8dcb99358a44:36801 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-07T12:15:35,181 INFO [RS:1;8dcb99358a44:44347 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-07T12:15:35,183 INFO [RS:1;8dcb99358a44:44347 {}] hbase.ChoreService(168): Chore ScheduledChore name=8dcb99358a44,44347,1730981734185-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T12:15:35,183 INFO [RS:0;8dcb99358a44:36801 {}] hbase.ChoreService(168): Chore ScheduledChore name=8dcb99358a44,36801,1730981734127-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T12:15:35,183 INFO [RS:2;8dcb99358a44:46073 {}] hbase.ChoreService(168): Chore ScheduledChore name=8dcb99358a44,46073,1730981734237-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T12:15:35,205 INFO [RS:2;8dcb99358a44:46073 {}] regionserver.Replication(204): 8dcb99358a44,46073,1730981734237 started 2024-11-07T12:15:35,205 INFO [RS:0;8dcb99358a44:36801 {}] regionserver.Replication(204): 8dcb99358a44,36801,1730981734127 started 2024-11-07T12:15:35,206 INFO [RS:2;8dcb99358a44:46073 {}] regionserver.HRegionServer(1767): Serving as 8dcb99358a44,46073,1730981734237, RpcServer on 8dcb99358a44/172.17.0.2:46073, sessionid=0x1011682c8a80003 2024-11-07T12:15:35,206 INFO [RS:0;8dcb99358a44:36801 {}] regionserver.HRegionServer(1767): Serving as 8dcb99358a44,36801,1730981734127, RpcServer on 8dcb99358a44/172.17.0.2:36801, sessionid=0x1011682c8a80001 2024-11-07T12:15:35,206 DEBUG [RS:0;8dcb99358a44:36801 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-07T12:15:35,206 DEBUG [RS:2;8dcb99358a44:46073 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-07T12:15:35,206 DEBUG [RS:0;8dcb99358a44:36801 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 8dcb99358a44,36801,1730981734127 2024-11-07T12:15:35,206 DEBUG [RS:2;8dcb99358a44:46073 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 8dcb99358a44,46073,1730981734237 2024-11-07T12:15:35,206 DEBUG [RS:2;8dcb99358a44:46073 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '8dcb99358a44,46073,1730981734237' 2024-11-07T12:15:35,206 DEBUG [RS:0;8dcb99358a44:36801 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '8dcb99358a44,36801,1730981734127' 2024-11-07T12:15:35,206 INFO [RS:1;8dcb99358a44:44347 {}] regionserver.Replication(204): 8dcb99358a44,44347,1730981734185 started 2024-11-07T12:15:35,206 INFO [RS:1;8dcb99358a44:44347 {}] regionserver.HRegionServer(1767): Serving as 8dcb99358a44,44347,1730981734185, RpcServer on 8dcb99358a44/172.17.0.2:44347, sessionid=0x1011682c8a80002 2024-11-07T12:15:35,206 DEBUG [RS:0;8dcb99358a44:36801 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-07T12:15:35,206 DEBUG [RS:2;8dcb99358a44:46073 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-07T12:15:35,207 DEBUG [RS:1;8dcb99358a44:44347 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-07T12:15:35,207 DEBUG [RS:1;8dcb99358a44:44347 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 8dcb99358a44,44347,1730981734185 2024-11-07T12:15:35,207 DEBUG [RS:1;8dcb99358a44:44347 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '8dcb99358a44,44347,1730981734185' 2024-11-07T12:15:35,207 DEBUG [RS:1;8dcb99358a44:44347 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-07T12:15:35,207 DEBUG [RS:1;8dcb99358a44:44347 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-07T12:15:35,207 DEBUG [RS:2;8dcb99358a44:46073 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-07T12:15:35,207 DEBUG [RS:0;8dcb99358a44:36801 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-07T12:15:35,208 DEBUG [RS:1;8dcb99358a44:44347 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-07T12:15:35,208 DEBUG [RS:1;8dcb99358a44:44347 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-07T12:15:35,208 DEBUG [RS:1;8dcb99358a44:44347 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 8dcb99358a44,44347,1730981734185 2024-11-07T12:15:35,208 DEBUG [RS:1;8dcb99358a44:44347 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '8dcb99358a44,44347,1730981734185' 2024-11-07T12:15:35,208 DEBUG [RS:1;8dcb99358a44:44347 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-07T12:15:35,208 DEBUG [RS:2;8dcb99358a44:46073 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-07T12:15:35,209 DEBUG [RS:2;8dcb99358a44:46073 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-07T12:15:35,209 DEBUG [RS:0;8dcb99358a44:36801 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-07T12:15:35,209 DEBUG [RS:1;8dcb99358a44:44347 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-07T12:15:35,209 DEBUG [RS:0;8dcb99358a44:36801 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-07T12:15:35,209 DEBUG [RS:2;8dcb99358a44:46073 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 8dcb99358a44,46073,1730981734237 2024-11-07T12:15:35,209 DEBUG [RS:2;8dcb99358a44:46073 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '8dcb99358a44,46073,1730981734237' 2024-11-07T12:15:35,209 DEBUG [RS:0;8dcb99358a44:36801 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 8dcb99358a44,36801,1730981734127 2024-11-07T12:15:35,209 DEBUG [RS:0;8dcb99358a44:36801 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '8dcb99358a44,36801,1730981734127' 2024-11-07T12:15:35,209 DEBUG [RS:2;8dcb99358a44:46073 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-07T12:15:35,209 DEBUG [RS:0;8dcb99358a44:36801 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-07T12:15:35,209 DEBUG [RS:1;8dcb99358a44:44347 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-07T12:15:35,209 INFO [RS:1;8dcb99358a44:44347 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-07T12:15:35,209 DEBUG [RS:2;8dcb99358a44:46073 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-07T12:15:35,209 INFO [RS:1;8dcb99358a44:44347 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-07T12:15:35,209 DEBUG [RS:0;8dcb99358a44:36801 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-07T12:15:35,209 DEBUG [RS:0;8dcb99358a44:36801 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-07T12:15:35,210 DEBUG [RS:2;8dcb99358a44:46073 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-07T12:15:35,210 INFO [RS:0;8dcb99358a44:36801 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-07T12:15:35,210 INFO [RS:2;8dcb99358a44:46073 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-07T12:15:35,210 INFO [RS:0;8dcb99358a44:36801 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-07T12:15:35,210 INFO [RS:2;8dcb99358a44:46073 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-07T12:15:35,322 INFO [RS:1;8dcb99358a44:44347 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=8dcb99358a44%2C44347%2C1730981734185, suffix=, logDir=hdfs://localhost:44787/user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/WALs/8dcb99358a44,44347,1730981734185, archiveDir=hdfs://localhost:44787/user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/oldWALs, maxLogs=32 2024-11-07T12:15:35,322 INFO [RS:0;8dcb99358a44:36801 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=8dcb99358a44%2C36801%2C1730981734127, suffix=, logDir=hdfs://localhost:44787/user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/WALs/8dcb99358a44,36801,1730981734127, archiveDir=hdfs://localhost:44787/user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/oldWALs, maxLogs=32 2024-11-07T12:15:35,322 INFO [RS:2;8dcb99358a44:46073 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=8dcb99358a44%2C46073%2C1730981734237, suffix=, logDir=hdfs://localhost:44787/user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/WALs/8dcb99358a44,46073,1730981734237, archiveDir=hdfs://localhost:44787/user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/oldWALs, maxLogs=32 2024-11-07T12:15:35,326 INFO [RS:2;8dcb99358a44:46073 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 8dcb99358a44%2C46073%2C1730981734237.1730981735326 2024-11-07T12:15:35,326 INFO [RS:1;8dcb99358a44:44347 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 8dcb99358a44%2C44347%2C1730981734185.1730981735326 2024-11-07T12:15:35,326 INFO [RS:0;8dcb99358a44:36801 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 8dcb99358a44%2C36801%2C1730981734127.1730981735326 2024-11-07T12:15:35,343 INFO [RS:0;8dcb99358a44:36801 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/WALs/8dcb99358a44,36801,1730981734127/8dcb99358a44%2C36801%2C1730981734127.1730981735326 2024-11-07T12:15:35,343 DEBUG [RS:0;8dcb99358a44:36801 {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37763:37763),(127.0.0.1/127.0.0.1:35009:35009),(127.0.0.1/127.0.0.1:44911:44911)] 2024-11-07T12:15:35,343 INFO [RS:2;8dcb99358a44:46073 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/WALs/8dcb99358a44,46073,1730981734237/8dcb99358a44%2C46073%2C1730981734237.1730981735326 2024-11-07T12:15:35,343 DEBUG [RS:2;8dcb99358a44:46073 {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35009:35009),(127.0.0.1/127.0.0.1:44911:44911),(127.0.0.1/127.0.0.1:37763:37763)] 2024-11-07T12:15:35,344 INFO [RS:1;8dcb99358a44:44347 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/WALs/8dcb99358a44,44347,1730981734185/8dcb99358a44%2C44347%2C1730981734185.1730981735326 2024-11-07T12:15:35,344 DEBUG [RS:1;8dcb99358a44:44347 {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35009:35009),(127.0.0.1/127.0.0.1:37763:37763),(127.0.0.1/127.0.0.1:44911:44911)] 2024-11-07T12:15:35,421 DEBUG [8dcb99358a44:34129 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-11-07T12:15:35,425 DEBUG [8dcb99358a44:34129 {}] balancer.BalancerClusterState(202): Hosts are {8dcb99358a44=0} racks are {/default-rack=0} 2024-11-07T12:15:35,432 DEBUG [8dcb99358a44:34129 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T12:15:35,432 DEBUG [8dcb99358a44:34129 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-11-07T12:15:35,432 DEBUG [8dcb99358a44:34129 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-11-07T12:15:35,432 INFO [8dcb99358a44:34129 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T12:15:35,432 INFO [8dcb99358a44:34129 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T12:15:35,432 INFO [8dcb99358a44:34129 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T12:15:35,432 DEBUG [8dcb99358a44:34129 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-11-07T12:15:35,436 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=8dcb99358a44,44347,1730981734185 2024-11-07T12:15:35,439 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 8dcb99358a44,44347,1730981734185, state=OPENING 2024-11-07T12:15:35,453 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-07T12:15:35,461 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34129-0x1011682c8a80000, quorum=127.0.0.1:52285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:15:35,461 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44347-0x1011682c8a80002, quorum=127.0.0.1:52285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:15:35,461 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36801-0x1011682c8a80001, quorum=127.0.0.1:52285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:15:35,461 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46073-0x1011682c8a80003, quorum=127.0.0.1:52285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:15:35,462 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-07T12:15:35,462 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-07T12:15:35,462 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-07T12:15:35,463 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-07T12:15:35,465 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=8dcb99358a44,44347,1730981734185}] 2024-11-07T12:15:35,645 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8dcb99358a44,44347,1730981734185 2024-11-07T12:15:35,647 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-07T12:15:35,649 INFO [RS-EventLoopGroup-8-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51852, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-07T12:15:35,658 INFO [RS_OPEN_META-regionserver/8dcb99358a44:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-11-07T12:15:35,659 INFO [RS_OPEN_META-regionserver/8dcb99358a44:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-07T12:15:35,662 INFO [RS_OPEN_META-regionserver/8dcb99358a44:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=8dcb99358a44%2C44347%2C1730981734185.meta, suffix=.meta, logDir=hdfs://localhost:44787/user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/WALs/8dcb99358a44,44347,1730981734185, archiveDir=hdfs://localhost:44787/user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/oldWALs, maxLogs=32 2024-11-07T12:15:35,665 INFO [RS_OPEN_META-regionserver/8dcb99358a44:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 8dcb99358a44%2C44347%2C1730981734185.meta.1730981735664.meta 2024-11-07T12:15:35,674 INFO [RS_OPEN_META-regionserver/8dcb99358a44:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/WALs/8dcb99358a44,44347,1730981734185/8dcb99358a44%2C44347%2C1730981734185.meta.1730981735664.meta 2024-11-07T12:15:35,674 DEBUG [RS_OPEN_META-regionserver/8dcb99358a44:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44911:44911),(127.0.0.1/127.0.0.1:37763:37763),(127.0.0.1/127.0.0.1:35009:35009)] 2024-11-07T12:15:35,674 DEBUG [RS_OPEN_META-regionserver/8dcb99358a44:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-07T12:15:35,675 DEBUG [RS_OPEN_META-regionserver/8dcb99358a44:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-07T12:15:35,733 DEBUG [RS_OPEN_META-regionserver/8dcb99358a44:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-07T12:15:35,738 INFO [RS_OPEN_META-regionserver/8dcb99358a44:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-07T12:15:35,742 DEBUG [RS_OPEN_META-regionserver/8dcb99358a44:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-07T12:15:35,742 DEBUG [RS_OPEN_META-regionserver/8dcb99358a44:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:15:35,742 DEBUG [RS_OPEN_META-regionserver/8dcb99358a44:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-11-07T12:15:35,742 DEBUG [RS_OPEN_META-regionserver/8dcb99358a44:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-11-07T12:15:35,745 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-07T12:15:35,746 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-07T12:15:35,746 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:15:35,747 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:15:35,747 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-07T12:15:35,749 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-07T12:15:35,749 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:15:35,750 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:15:35,750 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-07T12:15:35,751 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-07T12:15:35,751 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:15:35,752 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T12:15:35,753 DEBUG [RS_OPEN_META-regionserver/8dcb99358a44:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44787/user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/data/hbase/meta/1588230740 2024-11-07T12:15:35,756 DEBUG [RS_OPEN_META-regionserver/8dcb99358a44:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44787/user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/data/hbase/meta/1588230740 2024-11-07T12:15:35,759 DEBUG [RS_OPEN_META-regionserver/8dcb99358a44:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-07T12:15:35,761 DEBUG [RS_OPEN_META-regionserver/8dcb99358a44:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-11-07T12:15:35,763 INFO [RS_OPEN_META-regionserver/8dcb99358a44:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65444645, jitterRate=-0.024798795580863953}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-07T12:15:35,764 DEBUG [RS_OPEN_META-regionserver/8dcb99358a44:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-11-07T12:15:35,770 INFO [RS_OPEN_META-regionserver/8dcb99358a44:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1730981735640 2024-11-07T12:15:35,781 DEBUG [RS_OPEN_META-regionserver/8dcb99358a44:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-07T12:15:35,782 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=8dcb99358a44,44347,1730981734185 2024-11-07T12:15:35,782 INFO [RS_OPEN_META-regionserver/8dcb99358a44:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-11-07T12:15:35,784 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 8dcb99358a44,44347,1730981734185, state=OPEN 2024-11-07T12:15:35,826 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46073-0x1011682c8a80003, quorum=127.0.0.1:52285, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-07T12:15:35,826 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34129-0x1011682c8a80000, quorum=127.0.0.1:52285, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-07T12:15:35,826 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36801-0x1011682c8a80001, quorum=127.0.0.1:52285, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-07T12:15:35,826 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44347-0x1011682c8a80002, quorum=127.0.0.1:52285, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-07T12:15:35,826 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-07T12:15:35,826 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-07T12:15:35,826 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-07T12:15:35,826 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-07T12:15:35,833 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-11-07T12:15:35,833 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=8dcb99358a44,44347,1730981734185 in 361 msec 2024-11-07T12:15:35,839 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-11-07T12:15:35,840 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 826 msec 2024-11-07T12:15:35,846 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 967 msec 2024-11-07T12:15:35,847 INFO [master/8dcb99358a44:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1730981735847, completionTime=-1 2024-11-07T12:15:35,847 INFO [master/8dcb99358a44:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-11-07T12:15:35,847 DEBUG [master/8dcb99358a44:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-11-07T12:15:35,877 DEBUG [hconnection-0x47c5a916-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T12:15:35,879 INFO [RS-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51864, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T12:15:35,890 INFO [master/8dcb99358a44:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=3 2024-11-07T12:15:35,890 INFO [master/8dcb99358a44:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1730981795890 2024-11-07T12:15:35,890 INFO [master/8dcb99358a44:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1730981855890 2024-11-07T12:15:35,890 INFO [master/8dcb99358a44:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 43 msec 2024-11-07T12:15:35,921 DEBUG [master/8dcb99358a44:0:becomeActiveMaster {}] balancer.RegionLocationFinder(172): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-11-07T12:15:35,928 INFO [master/8dcb99358a44:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=8dcb99358a44,34129,1730981733968-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T12:15:35,928 INFO [master/8dcb99358a44:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=8dcb99358a44,34129,1730981733968-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:15:35,928 INFO [master/8dcb99358a44:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=8dcb99358a44,34129,1730981733968-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:15:35,929 INFO [master/8dcb99358a44:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-8dcb99358a44:34129, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:15:35,930 INFO [master/8dcb99358a44:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-07T12:15:35,936 DEBUG [master/8dcb99358a44:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-11-07T12:15:35,938 INFO [master/8dcb99358a44:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-11-07T12:15:35,939 INFO [master/8dcb99358a44:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-07T12:15:35,944 DEBUG [master/8dcb99358a44:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-11-07T12:15:35,946 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-11-07T12:15:35,947 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:15:35,949 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-07T12:15:35,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34531 is added to blk_1073741837_1013 (size=358) 2024-11-07T12:15:35,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37239 is added to blk_1073741837_1013 (size=358) 2024-11-07T12:15:35,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34265 is added to blk_1073741837_1013 (size=358) 2024-11-07T12:15:35,964 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 906f10909918a98197a5a7094ce19ce8, NAME => 'hbase:namespace,,1730981735939.906f10909918a98197a5a7094ce19ce8.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:44787/user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4 2024-11-07T12:15:35,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37239 is added to blk_1073741838_1014 (size=42) 2024-11-07T12:15:35,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34531 is added to blk_1073741838_1014 (size=42) 2024-11-07T12:15:35,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34265 is added to blk_1073741838_1014 (size=42) 2024-11-07T12:15:35,975 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1730981735939.906f10909918a98197a5a7094ce19ce8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:15:35,975 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing 906f10909918a98197a5a7094ce19ce8, disabling compactions & flushes 2024-11-07T12:15:35,975 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1730981735939.906f10909918a98197a5a7094ce19ce8. 2024-11-07T12:15:35,975 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1730981735939.906f10909918a98197a5a7094ce19ce8. 2024-11-07T12:15:35,976 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1730981735939.906f10909918a98197a5a7094ce19ce8. after waiting 0 ms 2024-11-07T12:15:35,976 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1730981735939.906f10909918a98197a5a7094ce19ce8. 2024-11-07T12:15:35,976 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1730981735939.906f10909918a98197a5a7094ce19ce8. 2024-11-07T12:15:35,976 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for 906f10909918a98197a5a7094ce19ce8: 2024-11-07T12:15:35,977 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-11-07T12:15:35,982 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1730981735939.906f10909918a98197a5a7094ce19ce8.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1730981735978"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1730981735978"}]},"ts":"1730981735978"} 2024-11-07T12:15:36,004 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-07T12:15:36,006 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-07T12:15:36,008 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730981736006"}]},"ts":"1730981736006"} 2024-11-07T12:15:36,011 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-11-07T12:15:36,028 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(202): Hosts are {8dcb99358a44=0} racks are {/default-rack=0} 2024-11-07T12:15:36,029 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T12:15:36,029 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-11-07T12:15:36,029 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-11-07T12:15:36,029 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T12:15:36,030 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T12:15:36,030 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T12:15:36,030 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-11-07T12:15:36,032 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=906f10909918a98197a5a7094ce19ce8, ASSIGN}] 2024-11-07T12:15:36,035 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=906f10909918a98197a5a7094ce19ce8, ASSIGN 2024-11-07T12:15:36,037 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=906f10909918a98197a5a7094ce19ce8, ASSIGN; state=OFFLINE, location=8dcb99358a44,44347,1730981734185; forceNewPlan=false, retain=false 2024-11-07T12:15:36,190 INFO [8dcb99358a44:34129 {}] balancer.BaseLoadBalancer(546): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-07T12:15:36,191 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=906f10909918a98197a5a7094ce19ce8, regionState=OPENING, regionLocation=8dcb99358a44,44347,1730981734185 2024-11-07T12:15:36,199 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure 906f10909918a98197a5a7094ce19ce8, server=8dcb99358a44,44347,1730981734185}] 2024-11-07T12:15:36,356 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 8dcb99358a44,44347,1730981734185 2024-11-07T12:15:36,364 INFO [RS_OPEN_PRIORITY_REGION-regionserver/8dcb99358a44:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1730981735939.906f10909918a98197a5a7094ce19ce8. 2024-11-07T12:15:36,365 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/8dcb99358a44:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => 906f10909918a98197a5a7094ce19ce8, NAME => 'hbase:namespace,,1730981735939.906f10909918a98197a5a7094ce19ce8.', STARTKEY => '', ENDKEY => ''} 2024-11-07T12:15:36,366 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/8dcb99358a44:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace 906f10909918a98197a5a7094ce19ce8 2024-11-07T12:15:36,366 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/8dcb99358a44:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1730981735939.906f10909918a98197a5a7094ce19ce8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:15:36,366 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/8dcb99358a44:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for 906f10909918a98197a5a7094ce19ce8 2024-11-07T12:15:36,366 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/8dcb99358a44:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for 906f10909918a98197a5a7094ce19ce8 2024-11-07T12:15:36,369 INFO [StoreOpener-906f10909918a98197a5a7094ce19ce8-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 906f10909918a98197a5a7094ce19ce8 2024-11-07T12:15:36,371 INFO [StoreOpener-906f10909918a98197a5a7094ce19ce8-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 906f10909918a98197a5a7094ce19ce8 columnFamilyName info 2024-11-07T12:15:36,371 DEBUG [StoreOpener-906f10909918a98197a5a7094ce19ce8-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:15:36,372 INFO [StoreOpener-906f10909918a98197a5a7094ce19ce8-1 {}] regionserver.HStore(327): Store=906f10909918a98197a5a7094ce19ce8/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T12:15:36,373 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/8dcb99358a44:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44787/user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/data/hbase/namespace/906f10909918a98197a5a7094ce19ce8 2024-11-07T12:15:36,373 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/8dcb99358a44:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44787/user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/data/hbase/namespace/906f10909918a98197a5a7094ce19ce8 2024-11-07T12:15:36,376 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/8dcb99358a44:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for 906f10909918a98197a5a7094ce19ce8 2024-11-07T12:15:36,380 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/8dcb99358a44:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44787/user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/data/hbase/namespace/906f10909918a98197a5a7094ce19ce8/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-07T12:15:36,381 INFO [RS_OPEN_PRIORITY_REGION-regionserver/8dcb99358a44:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened 906f10909918a98197a5a7094ce19ce8; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60254883, jitterRate=-0.10213227570056915}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-07T12:15:36,382 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/8dcb99358a44:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for 906f10909918a98197a5a7094ce19ce8: 2024-11-07T12:15:36,384 INFO [RS_OPEN_PRIORITY_REGION-regionserver/8dcb99358a44:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1730981735939.906f10909918a98197a5a7094ce19ce8., pid=6, masterSystemTime=1730981736356 2024-11-07T12:15:36,388 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/8dcb99358a44:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1730981735939.906f10909918a98197a5a7094ce19ce8. 2024-11-07T12:15:36,388 INFO [RS_OPEN_PRIORITY_REGION-regionserver/8dcb99358a44:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1730981735939.906f10909918a98197a5a7094ce19ce8. 2024-11-07T12:15:36,388 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=906f10909918a98197a5a7094ce19ce8, regionState=OPEN, openSeqNum=2, regionLocation=8dcb99358a44,44347,1730981734185 2024-11-07T12:15:36,396 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-11-07T12:15:36,397 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure 906f10909918a98197a5a7094ce19ce8, server=8dcb99358a44,44347,1730981734185 in 193 msec 2024-11-07T12:15:36,399 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-11-07T12:15:36,400 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=906f10909918a98197a5a7094ce19ce8, ASSIGN in 364 msec 2024-11-07T12:15:36,401 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-07T12:15:36,401 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730981736401"}]},"ts":"1730981736401"} 2024-11-07T12:15:36,404 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-11-07T12:15:36,452 DEBUG [master/8dcb99358a44:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:34129-0x1011682c8a80000, quorum=127.0.0.1:52285, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-11-07T12:15:36,453 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-11-07T12:15:36,458 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 513 msec 2024-11-07T12:15:36,461 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44347-0x1011682c8a80002, quorum=127.0.0.1:52285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:15:36,461 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36801-0x1011682c8a80001, quorum=127.0.0.1:52285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:15:36,461 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46073-0x1011682c8a80003, quorum=127.0.0.1:52285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:15:36,461 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34129-0x1011682c8a80000, quorum=127.0.0.1:52285, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-11-07T12:15:36,461 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34129-0x1011682c8a80000, quorum=127.0.0.1:52285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:15:36,489 DEBUG [master/8dcb99358a44:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-11-07T12:15:36,511 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34129-0x1011682c8a80000, quorum=127.0.0.1:52285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-11-07T12:15:36,524 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 37 msec 2024-11-07T12:15:36,533 DEBUG [master/8dcb99358a44:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-11-07T12:15:36,553 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34129-0x1011682c8a80000, quorum=127.0.0.1:52285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-11-07T12:15:36,565 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 31 msec 2024-11-07T12:15:36,595 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34129-0x1011682c8a80000, quorum=127.0.0.1:52285, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-11-07T12:15:36,611 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34129-0x1011682c8a80000, quorum=127.0.0.1:52285, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-11-07T12:15:36,611 INFO [master/8dcb99358a44:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 2.291sec 2024-11-07T12:15:36,613 INFO [master/8dcb99358a44:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-07T12:15:36,615 INFO [master/8dcb99358a44:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-07T12:15:36,616 INFO [master/8dcb99358a44:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-07T12:15:36,617 INFO [master/8dcb99358a44:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-07T12:15:36,617 INFO [master/8dcb99358a44:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-07T12:15:36,618 INFO [master/8dcb99358a44:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=8dcb99358a44,34129,1730981733968-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-07T12:15:36,618 INFO [master/8dcb99358a44:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=8dcb99358a44,34129,1730981733968-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-07T12:15:36,621 DEBUG [master/8dcb99358a44:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-11-07T12:15:36,622 INFO [master/8dcb99358a44:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-07T12:15:36,622 INFO [master/8dcb99358a44:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=8dcb99358a44,34129,1730981733968-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T12:15:36,710 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2ac21c3c to 127.0.0.1:52285 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@9da6a11 2024-11-07T12:15:36,711 WARN [Time-limited test {}] client.ZKConnectionRegistry(90): ZKConnectionRegistry is deprecated. See https://hbase.apache.org/book.html#client.rpcconnectionregistry 2024-11-07T12:15:36,721 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@79af7f81, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T12:15:36,724 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-07T12:15:36,724 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-07T12:15:36,733 DEBUG [hconnection-0x76426e84-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T12:15:36,753 INFO [RS-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51872, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T12:15:36,767 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=8dcb99358a44,34129,1730981733968 2024-11-07T12:15:36,775 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-07T12:15:36,778 INFO [RS-EventLoopGroup-6-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46660, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-07T12:15:36,786 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34129 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-07T12:15:36,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34129 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestHBaseWalOnEC 2024-11-07T12:15:36,790 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-11-07T12:15:36,790 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34129 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 9 2024-11-07T12:15:36,790 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:15:36,792 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-07T12:15:36,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34129 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-07T12:15:36,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34531 is added to blk_1073741839_1015 (size=392) 2024-11-07T12:15:36,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37239 is added to blk_1073741839_1015 (size=392) 2024-11-07T12:15:36,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34265 is added to blk_1073741839_1015 (size=392) 2024-11-07T12:15:36,808 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => bab6c331e1c41d0bf90429b06984ded4, NAME => 'TestHBaseWalOnEC,,1730981736783.bab6c331e1c41d0bf90429b06984ded4.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44787/user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4 2024-11-07T12:15:36,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34531 is added to blk_1073741840_1016 (size=51) 2024-11-07T12:15:36,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34265 is added to blk_1073741840_1016 (size=51) 2024-11-07T12:15:36,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37239 is added to blk_1073741840_1016 (size=51) 2024-11-07T12:15:36,821 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(894): Instantiated TestHBaseWalOnEC,,1730981736783.bab6c331e1c41d0bf90429b06984ded4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:15:36,822 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1681): Closing bab6c331e1c41d0bf90429b06984ded4, disabling compactions & flushes 2024-11-07T12:15:36,822 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1703): Closing region TestHBaseWalOnEC,,1730981736783.bab6c331e1c41d0bf90429b06984ded4. 2024-11-07T12:15:36,822 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestHBaseWalOnEC,,1730981736783.bab6c331e1c41d0bf90429b06984ded4. 2024-11-07T12:15:36,822 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestHBaseWalOnEC,,1730981736783.bab6c331e1c41d0bf90429b06984ded4. after waiting 0 ms 2024-11-07T12:15:36,822 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestHBaseWalOnEC,,1730981736783.bab6c331e1c41d0bf90429b06984ded4. 2024-11-07T12:15:36,822 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1922): Closed TestHBaseWalOnEC,,1730981736783.bab6c331e1c41d0bf90429b06984ded4. 2024-11-07T12:15:36,822 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1635): Region close journal for bab6c331e1c41d0bf90429b06984ded4: 2024-11-07T12:15:36,824 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-11-07T12:15:36,824 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1730981736783.bab6c331e1c41d0bf90429b06984ded4.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1730981736824"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1730981736824"}]},"ts":"1730981736824"} 2024-11-07T12:15:36,827 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-07T12:15:36,829 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-07T12:15:36,829 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730981736829"}]},"ts":"1730981736829"} 2024-11-07T12:15:36,832 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-11-07T12:15:36,853 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(202): Hosts are {8dcb99358a44=0} racks are {/default-rack=0} 2024-11-07T12:15:36,854 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T12:15:36,854 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-11-07T12:15:36,854 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-11-07T12:15:36,854 INFO [PEWorker-2 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T12:15:36,854 INFO [PEWorker-2 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T12:15:36,854 INFO [PEWorker-2 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T12:15:36,854 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-11-07T12:15:36,854 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=bab6c331e1c41d0bf90429b06984ded4, ASSIGN}] 2024-11-07T12:15:36,856 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=bab6c331e1c41d0bf90429b06984ded4, ASSIGN 2024-11-07T12:15:36,858 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=bab6c331e1c41d0bf90429b06984ded4, ASSIGN; state=OFFLINE, location=8dcb99358a44,44347,1730981734185; forceNewPlan=false, retain=false 2024-11-07T12:15:36,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34129 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-07T12:15:37,008 INFO [8dcb99358a44:34129 {}] balancer.BaseLoadBalancer(546): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-07T12:15:37,009 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=bab6c331e1c41d0bf90429b06984ded4, regionState=OPENING, regionLocation=8dcb99358a44,44347,1730981734185 2024-11-07T12:15:37,016 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure bab6c331e1c41d0bf90429b06984ded4, server=8dcb99358a44,44347,1730981734185}] 2024-11-07T12:15:37,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34129 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-07T12:15:37,173 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 8dcb99358a44,44347,1730981734185 2024-11-07T12:15:37,182 INFO [RS_OPEN_REGION-regionserver/8dcb99358a44:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestHBaseWalOnEC,,1730981736783.bab6c331e1c41d0bf90429b06984ded4. 2024-11-07T12:15:37,183 DEBUG [RS_OPEN_REGION-regionserver/8dcb99358a44:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => bab6c331e1c41d0bf90429b06984ded4, NAME => 'TestHBaseWalOnEC,,1730981736783.bab6c331e1c41d0bf90429b06984ded4.', STARTKEY => '', ENDKEY => ''} 2024-11-07T12:15:37,183 DEBUG [RS_OPEN_REGION-regionserver/8dcb99358a44:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC bab6c331e1c41d0bf90429b06984ded4 2024-11-07T12:15:37,183 DEBUG [RS_OPEN_REGION-regionserver/8dcb99358a44:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestHBaseWalOnEC,,1730981736783.bab6c331e1c41d0bf90429b06984ded4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T12:15:37,184 DEBUG [RS_OPEN_REGION-regionserver/8dcb99358a44:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for bab6c331e1c41d0bf90429b06984ded4 2024-11-07T12:15:37,184 DEBUG [RS_OPEN_REGION-regionserver/8dcb99358a44:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for bab6c331e1c41d0bf90429b06984ded4 2024-11-07T12:15:37,187 INFO [StoreOpener-bab6c331e1c41d0bf90429b06984ded4-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region bab6c331e1c41d0bf90429b06984ded4 2024-11-07T12:15:37,189 INFO [StoreOpener-bab6c331e1c41d0bf90429b06984ded4-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region bab6c331e1c41d0bf90429b06984ded4 columnFamilyName cf 2024-11-07T12:15:37,190 DEBUG [StoreOpener-bab6c331e1c41d0bf90429b06984ded4-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T12:15:37,191 INFO [StoreOpener-bab6c331e1c41d0bf90429b06984ded4-1 {}] regionserver.HStore(327): Store=bab6c331e1c41d0bf90429b06984ded4/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T12:15:37,192 DEBUG [RS_OPEN_REGION-regionserver/8dcb99358a44:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44787/user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/data/default/TestHBaseWalOnEC/bab6c331e1c41d0bf90429b06984ded4 2024-11-07T12:15:37,193 DEBUG [RS_OPEN_REGION-regionserver/8dcb99358a44:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44787/user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/data/default/TestHBaseWalOnEC/bab6c331e1c41d0bf90429b06984ded4 2024-11-07T12:15:37,198 DEBUG [RS_OPEN_REGION-regionserver/8dcb99358a44:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for bab6c331e1c41d0bf90429b06984ded4 2024-11-07T12:15:37,203 DEBUG [RS_OPEN_REGION-regionserver/8dcb99358a44:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44787/user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/data/default/TestHBaseWalOnEC/bab6c331e1c41d0bf90429b06984ded4/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-07T12:15:37,204 INFO [RS_OPEN_REGION-regionserver/8dcb99358a44:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened bab6c331e1c41d0bf90429b06984ded4; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73551406, jitterRate=0.09600135684013367}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-07T12:15:37,205 DEBUG [RS_OPEN_REGION-regionserver/8dcb99358a44:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for bab6c331e1c41d0bf90429b06984ded4: 2024-11-07T12:15:37,207 INFO [RS_OPEN_REGION-regionserver/8dcb99358a44:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestHBaseWalOnEC,,1730981736783.bab6c331e1c41d0bf90429b06984ded4., pid=11, masterSystemTime=1730981737173 2024-11-07T12:15:37,211 DEBUG [RS_OPEN_REGION-regionserver/8dcb99358a44:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestHBaseWalOnEC,,1730981736783.bab6c331e1c41d0bf90429b06984ded4. 2024-11-07T12:15:37,211 INFO [RS_OPEN_REGION-regionserver/8dcb99358a44:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestHBaseWalOnEC,,1730981736783.bab6c331e1c41d0bf90429b06984ded4. 2024-11-07T12:15:37,212 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=bab6c331e1c41d0bf90429b06984ded4, regionState=OPEN, openSeqNum=2, regionLocation=8dcb99358a44,44347,1730981734185 2024-11-07T12:15:37,219 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-11-07T12:15:37,220 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure bab6c331e1c41d0bf90429b06984ded4, server=8dcb99358a44,44347,1730981734185 in 199 msec 2024-11-07T12:15:37,223 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-11-07T12:15:37,223 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=bab6c331e1c41d0bf90429b06984ded4, ASSIGN in 365 msec 2024-11-07T12:15:37,224 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-07T12:15:37,225 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730981737225"}]},"ts":"1730981737225"} 2024-11-07T12:15:37,228 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-11-07T12:15:37,263 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-11-07T12:15:37,267 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestHBaseWalOnEC in 476 msec 2024-11-07T12:15:37,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34129 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-07T12:15:37,408 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestHBaseWalOnEC, procId: 9 completed 2024-11-07T12:15:37,409 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-11-07T12:15:37,411 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-07T12:15:37,418 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-11-07T12:15:37,419 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-07T12:15:37,419 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table TestHBaseWalOnEC assigned. 2024-11-07T12:15:37,433 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34129 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-11-07T12:15:37,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34129 {}] procedure2.ProcedureExecutor(1098): Stored pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestHBaseWalOnEC 2024-11-07T12:15:37,439 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-11-07T12:15:37,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34129 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-07T12:15:37,441 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-07T12:15:37,442 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-07T12:15:37,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34129 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-07T12:15:37,604 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 8dcb99358a44,44347,1730981734185 2024-11-07T12:15:37,605 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44347 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-07T12:15:37,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8dcb99358a44:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1730981736783.bab6c331e1c41d0bf90429b06984ded4. 2024-11-07T12:15:37,606 INFO [RS_FLUSH_OPERATIONS-regionserver/8dcb99358a44:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2837): Flushing bab6c331e1c41d0bf90429b06984ded4 1/1 column families, dataSize=32 B heapSize=360 B 2024-11-07T12:15:37,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8dcb99358a44:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44787/user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/data/default/TestHBaseWalOnEC/bab6c331e1c41d0bf90429b06984ded4/.tmp/cf/85bc51d3727e40ceba2e94dab73163fd is 36, key is row/cf:cq/1730981737423/Put/seqid=0 2024-11-07T12:15:37,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34265 is added to blk_1073741841_1017 (size=4787) 2024-11-07T12:15:37,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34531 is added to blk_1073741841_1017 (size=4787) 2024-11-07T12:15:37,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37239 is added to blk_1073741841_1017 (size=4787) 2024-11-07T12:15:37,662 INFO [RS_FLUSH_OPERATIONS-regionserver/8dcb99358a44:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:44787/user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/data/default/TestHBaseWalOnEC/bab6c331e1c41d0bf90429b06984ded4/.tmp/cf/85bc51d3727e40ceba2e94dab73163fd 2024-11-07T12:15:37,700 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8dcb99358a44:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44787/user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/data/default/TestHBaseWalOnEC/bab6c331e1c41d0bf90429b06984ded4/.tmp/cf/85bc51d3727e40ceba2e94dab73163fd as hdfs://localhost:44787/user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/data/default/TestHBaseWalOnEC/bab6c331e1c41d0bf90429b06984ded4/cf/85bc51d3727e40ceba2e94dab73163fd 2024-11-07T12:15:37,709 INFO [RS_FLUSH_OPERATIONS-regionserver/8dcb99358a44:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44787/user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/data/default/TestHBaseWalOnEC/bab6c331e1c41d0bf90429b06984ded4/cf/85bc51d3727e40ceba2e94dab73163fd, entries=1, sequenceid=5, filesize=4.7 K 2024-11-07T12:15:37,711 INFO [RS_FLUSH_OPERATIONS-regionserver/8dcb99358a44:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(3040): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for bab6c331e1c41d0bf90429b06984ded4 in 105ms, sequenceid=5, compaction requested=false 2024-11-07T12:15:37,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8dcb99358a44:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestHBaseWalOnEC' 2024-11-07T12:15:37,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8dcb99358a44:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2538): Flush status journal for bab6c331e1c41d0bf90429b06984ded4: 2024-11-07T12:15:37,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8dcb99358a44:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1730981736783.bab6c331e1c41d0bf90429b06984ded4. 2024-11-07T12:15:37,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8dcb99358a44:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-11-07T12:15:37,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34129 {}] master.HMaster(4106): Remote procedure done, pid=13 2024-11-07T12:15:37,722 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=13, resume processing ppid=12 2024-11-07T12:15:37,722 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=13, ppid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 277 msec 2024-11-07T12:15:37,725 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestHBaseWalOnEC in 289 msec 2024-11-07T12:15:37,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34129 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-07T12:15:37,747 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC, procId: 12 completed 2024-11-07T12:15:37,760 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-11-07T12:15:37,761 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-07T12:15:37,761 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2ac21c3c to 127.0.0.1:52285 2024-11-07T12:15:37,761 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:15:37,762 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-07T12:15:37,762 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=477457214, stopped=false 2024-11-07T12:15:37,762 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=8dcb99358a44,34129,1730981733968 2024-11-07T12:15:37,803 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44347-0x1011682c8a80002, quorum=127.0.0.1:52285, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-07T12:15:37,803 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34129-0x1011682c8a80000, quorum=127.0.0.1:52285, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-07T12:15:37,803 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36801-0x1011682c8a80001, quorum=127.0.0.1:52285, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-07T12:15:37,803 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46073-0x1011682c8a80003, quorum=127.0.0.1:52285, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-07T12:15:37,803 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-11-07T12:15:37,803 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36801-0x1011682c8a80001, quorum=127.0.0.1:52285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:15:37,803 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34129-0x1011682c8a80000, quorum=127.0.0.1:52285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:15:37,803 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46073-0x1011682c8a80003, quorum=127.0.0.1:52285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:15:37,803 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44347-0x1011682c8a80002, quorum=127.0.0.1:52285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:15:37,803 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:15:37,804 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44347-0x1011682c8a80002, quorum=127.0.0.1:52285, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T12:15:37,804 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '8dcb99358a44,36801,1730981734127' ***** 2024-11-07T12:15:37,804 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36801-0x1011682c8a80001, quorum=127.0.0.1:52285, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T12:15:37,804 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-11-07T12:15:37,804 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '8dcb99358a44,44347,1730981734185' ***** 2024-11-07T12:15:37,804 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-11-07T12:15:37,804 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '8dcb99358a44,46073,1730981734237' ***** 2024-11-07T12:15:37,804 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46073-0x1011682c8a80003, quorum=127.0.0.1:52285, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T12:15:37,804 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-11-07T12:15:37,804 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34129-0x1011682c8a80000, quorum=127.0.0.1:52285, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T12:15:37,804 INFO [RS:0;8dcb99358a44:36801 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-07T12:15:37,805 INFO [RS:2;8dcb99358a44:46073 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-07T12:15:37,805 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-11-07T12:15:37,805 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-11-07T12:15:37,805 INFO [RS:1;8dcb99358a44:44347 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-07T12:15:37,805 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-11-07T12:15:37,806 INFO [RS:1;8dcb99358a44:44347 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-07T12:15:37,806 INFO [RS:1;8dcb99358a44:44347 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-07T12:15:37,806 INFO [RS:0;8dcb99358a44:36801 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-07T12:15:37,806 INFO [RS:2;8dcb99358a44:46073 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-07T12:15:37,806 INFO [RS:0;8dcb99358a44:36801 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-07T12:15:37,806 INFO [RS:2;8dcb99358a44:46073 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-07T12:15:37,806 INFO [RS:1;8dcb99358a44:44347 {}] regionserver.HRegionServer(3579): Received CLOSE for bab6c331e1c41d0bf90429b06984ded4 2024-11-07T12:15:37,806 INFO [RS:0;8dcb99358a44:36801 {}] regionserver.HRegionServer(1224): stopping server 8dcb99358a44,36801,1730981734127 2024-11-07T12:15:37,806 INFO [RS:2;8dcb99358a44:46073 {}] regionserver.HRegionServer(1224): stopping server 8dcb99358a44,46073,1730981734237 2024-11-07T12:15:37,806 DEBUG [RS:0;8dcb99358a44:36801 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:15:37,806 DEBUG [RS:2;8dcb99358a44:46073 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:15:37,806 INFO [RS:2;8dcb99358a44:46073 {}] regionserver.HRegionServer(1250): stopping server 8dcb99358a44,46073,1730981734237; all regions closed. 2024-11-07T12:15:37,806 INFO [RS:0;8dcb99358a44:36801 {}] regionserver.HRegionServer(1250): stopping server 8dcb99358a44,36801,1730981734127; all regions closed. 2024-11-07T12:15:37,807 INFO [RS:1;8dcb99358a44:44347 {}] regionserver.HRegionServer(3579): Received CLOSE for 906f10909918a98197a5a7094ce19ce8 2024-11-07T12:15:37,807 INFO [RS:1;8dcb99358a44:44347 {}] regionserver.HRegionServer(1224): stopping server 8dcb99358a44,44347,1730981734185 2024-11-07T12:15:37,807 DEBUG [RS:1;8dcb99358a44:44347 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:15:37,807 INFO [RS:1;8dcb99358a44:44347 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-07T12:15:37,807 INFO [RS:1;8dcb99358a44:44347 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-07T12:15:37,807 INFO [RS:1;8dcb99358a44:44347 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-07T12:15:37,807 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/WALs/8dcb99358a44,36801,1730981734127 2024-11-07T12:15:37,808 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/WALs/8dcb99358a44,46073,1730981734237 2024-11-07T12:15:37,808 INFO [RS:1;8dcb99358a44:44347 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-11-07T12:15:37,807 DEBUG [RS_CLOSE_REGION-regionserver/8dcb99358a44:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing bab6c331e1c41d0bf90429b06984ded4, disabling compactions & flushes 2024-11-07T12:15:37,808 INFO [RS_CLOSE_REGION-regionserver/8dcb99358a44:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region TestHBaseWalOnEC,,1730981736783.bab6c331e1c41d0bf90429b06984ded4. 2024-11-07T12:15:37,808 DEBUG [RS_CLOSE_REGION-regionserver/8dcb99358a44:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestHBaseWalOnEC,,1730981736783.bab6c331e1c41d0bf90429b06984ded4. 2024-11-07T12:15:37,809 DEBUG [RS_CLOSE_REGION-regionserver/8dcb99358a44:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on TestHBaseWalOnEC,,1730981736783.bab6c331e1c41d0bf90429b06984ded4. after waiting 0 ms 2024-11-07T12:15:37,809 INFO [RS:1;8dcb99358a44:44347 {}] regionserver.HRegionServer(1599): Waiting on 3 regions to close 2024-11-07T12:15:37,809 DEBUG [RS_CLOSE_REGION-regionserver/8dcb99358a44:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region TestHBaseWalOnEC,,1730981736783.bab6c331e1c41d0bf90429b06984ded4. 2024-11-07T12:15:37,809 DEBUG [RS:1;8dcb99358a44:44347 {}] regionserver.HRegionServer(1603): Online Regions={bab6c331e1c41d0bf90429b06984ded4=TestHBaseWalOnEC,,1730981736783.bab6c331e1c41d0bf90429b06984ded4., 1588230740=hbase:meta,,1.1588230740, 906f10909918a98197a5a7094ce19ce8=hbase:namespace,,1730981735939.906f10909918a98197a5a7094ce19ce8.} 2024-11-07T12:15:37,810 DEBUG [RS:1;8dcb99358a44:44347 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 906f10909918a98197a5a7094ce19ce8, bab6c331e1c41d0bf90429b06984ded4 2024-11-07T12:15:37,810 DEBUG [RS_CLOSE_META-regionserver/8dcb99358a44:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-11-07T12:15:37,810 INFO [RS_CLOSE_META-regionserver/8dcb99358a44:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-11-07T12:15:37,810 DEBUG [RS_CLOSE_META-regionserver/8dcb99358a44:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-11-07T12:15:37,810 DEBUG [RS_CLOSE_META-regionserver/8dcb99358a44:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-07T12:15:37,810 DEBUG [RS_CLOSE_META-regionserver/8dcb99358a44:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-11-07T12:15:37,811 INFO [RS_CLOSE_META-regionserver/8dcb99358a44:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=2.51 KB heapSize=5.02 KB 2024-11-07T12:15:37,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34531 is added to blk_1073741834_1010 (size=93) 2024-11-07T12:15:37,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37239 is added to blk_1073741833_1009 (size=93) 2024-11-07T12:15:37,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34265 is added to blk_1073741833_1009 (size=93) 2024-11-07T12:15:37,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37239 is added to blk_1073741834_1010 (size=93) 2024-11-07T12:15:37,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34265 is added to blk_1073741834_1010 (size=93) 2024-11-07T12:15:37,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34531 is added to blk_1073741833_1009 (size=93) 2024-11-07T12:15:37,818 DEBUG [RS:2;8dcb99358a44:46073 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/oldWALs 2024-11-07T12:15:37,819 INFO [RS:2;8dcb99358a44:46073 {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog 8dcb99358a44%2C46073%2C1730981734237:(num 1730981735326) 2024-11-07T12:15:37,819 DEBUG [RS:2;8dcb99358a44:46073 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:15:37,819 DEBUG [RS:0;8dcb99358a44:36801 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/oldWALs 2024-11-07T12:15:37,819 INFO [RS:0;8dcb99358a44:36801 {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog 8dcb99358a44%2C36801%2C1730981734127:(num 1730981735326) 2024-11-07T12:15:37,819 DEBUG [RS:0;8dcb99358a44:36801 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:15:37,819 INFO [RS:2;8dcb99358a44:46073 {}] regionserver.LeaseManager(133): Closed leases 2024-11-07T12:15:37,819 INFO [RS:0;8dcb99358a44:36801 {}] regionserver.LeaseManager(133): Closed leases 2024-11-07T12:15:37,819 DEBUG [RS_CLOSE_REGION-regionserver/8dcb99358a44:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44787/user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/data/default/TestHBaseWalOnEC/bab6c331e1c41d0bf90429b06984ded4/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-07T12:15:37,819 INFO [RS:0;8dcb99358a44:36801 {}] hbase.ChoreService(370): Chore service for: regionserver/8dcb99358a44:0 had [ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-07T12:15:37,819 INFO [RS:2;8dcb99358a44:46073 {}] hbase.ChoreService(370): Chore service for: regionserver/8dcb99358a44:0 had [ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-07T12:15:37,819 INFO [RS:0;8dcb99358a44:36801 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-07T12:15:37,819 INFO [RS:2;8dcb99358a44:46073 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-07T12:15:37,819 INFO [RS:0;8dcb99358a44:36801 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-07T12:15:37,819 INFO [RS:2;8dcb99358a44:46073 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-07T12:15:37,819 INFO [regionserver/8dcb99358a44:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-11-07T12:15:37,819 INFO [RS:0;8dcb99358a44:36801 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-07T12:15:37,819 INFO [RS:2;8dcb99358a44:46073 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-07T12:15:37,820 INFO [regionserver/8dcb99358a44:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-11-07T12:15:37,820 INFO [RS:0;8dcb99358a44:36801 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:36801 2024-11-07T12:15:37,820 INFO [RS:2;8dcb99358a44:46073 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:46073 2024-11-07T12:15:37,822 INFO [RS_CLOSE_REGION-regionserver/8dcb99358a44:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed TestHBaseWalOnEC,,1730981736783.bab6c331e1c41d0bf90429b06984ded4. 2024-11-07T12:15:37,822 DEBUG [RS_CLOSE_REGION-regionserver/8dcb99358a44:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for bab6c331e1c41d0bf90429b06984ded4: 2024-11-07T12:15:37,822 DEBUG [RS_CLOSE_REGION-regionserver/8dcb99358a44:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1730981736783.bab6c331e1c41d0bf90429b06984ded4. 2024-11-07T12:15:37,822 DEBUG [RS_CLOSE_REGION-regionserver/8dcb99358a44:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 906f10909918a98197a5a7094ce19ce8, disabling compactions & flushes 2024-11-07T12:15:37,822 INFO [RS_CLOSE_REGION-regionserver/8dcb99358a44:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1730981735939.906f10909918a98197a5a7094ce19ce8. 2024-11-07T12:15:37,823 DEBUG [RS_CLOSE_REGION-regionserver/8dcb99358a44:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1730981735939.906f10909918a98197a5a7094ce19ce8. 2024-11-07T12:15:37,823 DEBUG [RS_CLOSE_REGION-regionserver/8dcb99358a44:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1730981735939.906f10909918a98197a5a7094ce19ce8. after waiting 0 ms 2024-11-07T12:15:37,823 DEBUG [RS_CLOSE_REGION-regionserver/8dcb99358a44:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1730981735939.906f10909918a98197a5a7094ce19ce8. 2024-11-07T12:15:37,823 INFO [RS_CLOSE_REGION-regionserver/8dcb99358a44:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 906f10909918a98197a5a7094ce19ce8 1/1 column families, dataSize=78 B heapSize=488 B 2024-11-07T12:15:37,828 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34129-0x1011682c8a80000, quorum=127.0.0.1:52285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-07T12:15:37,828 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46073-0x1011682c8a80003, quorum=127.0.0.1:52285, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/8dcb99358a44,46073,1730981734237 2024-11-07T12:15:37,828 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36801-0x1011682c8a80001, quorum=127.0.0.1:52285, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/8dcb99358a44,36801,1730981734127 2024-11-07T12:15:37,829 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [8dcb99358a44,46073,1730981734237] 2024-11-07T12:15:37,829 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 8dcb99358a44,46073,1730981734237; numProcessing=1 2024-11-07T12:15:37,842 DEBUG [RS_CLOSE_META-regionserver/8dcb99358a44:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44787/user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/data/hbase/meta/1588230740/.tmp/info/3c2b719e2c8f461985c8b8ec03bc3031 is 153, key is TestHBaseWalOnEC,,1730981736783.bab6c331e1c41d0bf90429b06984ded4./info:regioninfo/1730981737212/Put/seqid=0 2024-11-07T12:15:37,843 DEBUG [RS_CLOSE_REGION-regionserver/8dcb99358a44:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44787/user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/data/hbase/namespace/906f10909918a98197a5a7094ce19ce8/.tmp/info/9f4212e1021e4c1f9e499294fcc519f8 is 45, key is default/info:d/1730981736499/Put/seqid=0 2024-11-07T12:15:37,844 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/8dcb99358a44,46073,1730981734237 already deleted, retry=false 2024-11-07T12:15:37,844 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 8dcb99358a44,46073,1730981734237 expired; onlineServers=2 2024-11-07T12:15:37,844 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [8dcb99358a44,36801,1730981734127] 2024-11-07T12:15:37,844 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 8dcb99358a44,36801,1730981734127; numProcessing=2 2024-11-07T12:15:37,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34265 is added to blk_1073741843_1019 (size=5037) 2024-11-07T12:15:37,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34531 is added to blk_1073741843_1019 (size=5037) 2024-11-07T12:15:37,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37239 is added to blk_1073741842_1018 (size=7835) 2024-11-07T12:15:37,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34265 is added to blk_1073741842_1018 (size=7835) 2024-11-07T12:15:37,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37239 is added to blk_1073741843_1019 (size=5037) 2024-11-07T12:15:37,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34531 is added to blk_1073741842_1018 (size=7835) 2024-11-07T12:15:37,851 INFO [RS_CLOSE_REGION-regionserver/8dcb99358a44:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:44787/user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/data/hbase/namespace/906f10909918a98197a5a7094ce19ce8/.tmp/info/9f4212e1021e4c1f9e499294fcc519f8 2024-11-07T12:15:37,851 INFO [RS_CLOSE_META-regionserver/8dcb99358a44:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.32 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:44787/user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/data/hbase/meta/1588230740/.tmp/info/3c2b719e2c8f461985c8b8ec03bc3031 2024-11-07T12:15:37,852 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/8dcb99358a44,36801,1730981734127 already deleted, retry=false 2024-11-07T12:15:37,853 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 8dcb99358a44,36801,1730981734127 expired; onlineServers=1 2024-11-07T12:15:37,861 DEBUG [RS_CLOSE_REGION-regionserver/8dcb99358a44:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44787/user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/data/hbase/namespace/906f10909918a98197a5a7094ce19ce8/.tmp/info/9f4212e1021e4c1f9e499294fcc519f8 as hdfs://localhost:44787/user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/data/hbase/namespace/906f10909918a98197a5a7094ce19ce8/info/9f4212e1021e4c1f9e499294fcc519f8 2024-11-07T12:15:37,865 INFO [regionserver/8dcb99358a44:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-07T12:15:37,867 INFO [regionserver/8dcb99358a44:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-07T12:15:37,867 INFO [regionserver/8dcb99358a44:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-07T12:15:37,872 INFO [RS_CLOSE_REGION-regionserver/8dcb99358a44:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44787/user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/data/hbase/namespace/906f10909918a98197a5a7094ce19ce8/info/9f4212e1021e4c1f9e499294fcc519f8, entries=2, sequenceid=6, filesize=4.9 K 2024-11-07T12:15:37,874 INFO [RS_CLOSE_REGION-regionserver/8dcb99358a44:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for 906f10909918a98197a5a7094ce19ce8 in 51ms, sequenceid=6, compaction requested=false 2024-11-07T12:15:37,874 DEBUG [RS_CLOSE_REGION-regionserver/8dcb99358a44:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-11-07T12:15:37,884 DEBUG [RS_CLOSE_REGION-regionserver/8dcb99358a44:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44787/user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/data/hbase/namespace/906f10909918a98197a5a7094ce19ce8/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-07T12:15:37,884 DEBUG [RS_CLOSE_META-regionserver/8dcb99358a44:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44787/user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/data/hbase/meta/1588230740/.tmp/table/765f70d0e8a74e35846d7637c62baa39 is 52, key is TestHBaseWalOnEC/table:state/1730981737225/Put/seqid=0 2024-11-07T12:15:37,885 INFO [RS_CLOSE_REGION-regionserver/8dcb99358a44:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1730981735939.906f10909918a98197a5a7094ce19ce8. 2024-11-07T12:15:37,885 DEBUG [RS_CLOSE_REGION-regionserver/8dcb99358a44:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 906f10909918a98197a5a7094ce19ce8: 2024-11-07T12:15:37,885 DEBUG [RS_CLOSE_REGION-regionserver/8dcb99358a44:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1730981735939.906f10909918a98197a5a7094ce19ce8. 2024-11-07T12:15:37,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34531 is added to blk_1073741844_1020 (size=5347) 2024-11-07T12:15:37,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34265 is added to blk_1073741844_1020 (size=5347) 2024-11-07T12:15:37,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37239 is added to blk_1073741844_1020 (size=5347) 2024-11-07T12:15:37,897 INFO [RS_CLOSE_META-regionserver/8dcb99358a44:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=190 B at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:44787/user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/data/hbase/meta/1588230740/.tmp/table/765f70d0e8a74e35846d7637c62baa39 2024-11-07T12:15:37,907 DEBUG [RS_CLOSE_META-regionserver/8dcb99358a44:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44787/user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/data/hbase/meta/1588230740/.tmp/info/3c2b719e2c8f461985c8b8ec03bc3031 as hdfs://localhost:44787/user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/data/hbase/meta/1588230740/info/3c2b719e2c8f461985c8b8ec03bc3031 2024-11-07T12:15:37,916 INFO [RS_CLOSE_META-regionserver/8dcb99358a44:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44787/user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/data/hbase/meta/1588230740/info/3c2b719e2c8f461985c8b8ec03bc3031, entries=20, sequenceid=14, filesize=7.7 K 2024-11-07T12:15:37,917 DEBUG [RS_CLOSE_META-regionserver/8dcb99358a44:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44787/user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/data/hbase/meta/1588230740/.tmp/table/765f70d0e8a74e35846d7637c62baa39 as hdfs://localhost:44787/user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/data/hbase/meta/1588230740/table/765f70d0e8a74e35846d7637c62baa39 2024-11-07T12:15:37,926 INFO [RS_CLOSE_META-regionserver/8dcb99358a44:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44787/user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/data/hbase/meta/1588230740/table/765f70d0e8a74e35846d7637c62baa39, entries=4, sequenceid=14, filesize=5.2 K 2024-11-07T12:15:37,928 INFO [RS_CLOSE_META-regionserver/8dcb99358a44:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~2.51 KB/2567, heapSize ~4.74 KB/4856, currentSize=0 B/0 for 1588230740 in 117ms, sequenceid=14, compaction requested=false 2024-11-07T12:15:37,928 DEBUG [RS_CLOSE_META-regionserver/8dcb99358a44:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-07T12:15:37,933 DEBUG [RS_CLOSE_META-regionserver/8dcb99358a44:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44787/user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/data/hbase/meta/1588230740/recovered.edits/17.seqid, newMaxSeqId=17, maxSeqId=1 2024-11-07T12:15:37,933 DEBUG [RS_CLOSE_META-regionserver/8dcb99358a44:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-07T12:15:37,934 INFO [RS_CLOSE_META-regionserver/8dcb99358a44:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-11-07T12:15:37,934 DEBUG [RS_CLOSE_META-regionserver/8dcb99358a44:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-11-07T12:15:37,934 DEBUG [RS_CLOSE_META-regionserver/8dcb99358a44:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-07T12:15:37,936 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46073-0x1011682c8a80003, quorum=127.0.0.1:52285, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T12:15:37,936 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36801-0x1011682c8a80001, quorum=127.0.0.1:52285, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T12:15:37,936 INFO [RS:2;8dcb99358a44:46073 {}] regionserver.HRegionServer(1307): Exiting; stopping=8dcb99358a44,46073,1730981734237; zookeeper connection closed. 2024-11-07T12:15:37,936 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36801-0x1011682c8a80001, quorum=127.0.0.1:52285, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T12:15:37,936 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46073-0x1011682c8a80003, quorum=127.0.0.1:52285, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T12:15:37,936 INFO [RS:0;8dcb99358a44:36801 {}] regionserver.HRegionServer(1307): Exiting; stopping=8dcb99358a44,36801,1730981734127; zookeeper connection closed. 2024-11-07T12:15:37,937 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7ea1d661 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7ea1d661 2024-11-07T12:15:37,937 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@636d9eae {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@636d9eae 2024-11-07T12:15:38,010 INFO [RS:1;8dcb99358a44:44347 {}] regionserver.HRegionServer(1250): stopping server 8dcb99358a44,44347,1730981734185; all regions closed. 2024-11-07T12:15:38,011 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/WALs/8dcb99358a44,44347,1730981734185 2024-11-07T12:15:38,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34265 is added to blk_1073741836_1012 (size=4015) 2024-11-07T12:15:38,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34531 is added to blk_1073741836_1012 (size=4015) 2024-11-07T12:15:38,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37239 is added to blk_1073741836_1012 (size=4015) 2024-11-07T12:15:38,022 DEBUG [RS:1;8dcb99358a44:44347 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/oldWALs 2024-11-07T12:15:38,022 INFO [RS:1;8dcb99358a44:44347 {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog 8dcb99358a44%2C44347%2C1730981734185.meta:.meta(num 1730981735664) 2024-11-07T12:15:38,023 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/WALs/8dcb99358a44,44347,1730981734185 2024-11-07T12:15:38,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37239 is added to blk_1073741835_1011 (size=2619) 2024-11-07T12:15:38,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34531 is added to blk_1073741835_1011 (size=2619) 2024-11-07T12:15:38,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34265 is added to blk_1073741835_1011 (size=2619) 2024-11-07T12:15:38,028 DEBUG [RS:1;8dcb99358a44:44347 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/oldWALs 2024-11-07T12:15:38,028 INFO [RS:1;8dcb99358a44:44347 {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog 8dcb99358a44%2C44347%2C1730981734185:(num 1730981735326) 2024-11-07T12:15:38,028 DEBUG [RS:1;8dcb99358a44:44347 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:15:38,028 INFO [RS:1;8dcb99358a44:44347 {}] regionserver.LeaseManager(133): Closed leases 2024-11-07T12:15:38,028 INFO [RS:1;8dcb99358a44:44347 {}] hbase.ChoreService(370): Chore service for: regionserver/8dcb99358a44:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS] on shutdown 2024-11-07T12:15:38,028 INFO [regionserver/8dcb99358a44:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-11-07T12:15:38,029 INFO [RS:1;8dcb99358a44:44347 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:44347 2024-11-07T12:15:38,036 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34129-0x1011682c8a80000, quorum=127.0.0.1:52285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-07T12:15:38,036 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44347-0x1011682c8a80002, quorum=127.0.0.1:52285, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/8dcb99358a44,44347,1730981734185 2024-11-07T12:15:38,044 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [8dcb99358a44,44347,1730981734185] 2024-11-07T12:15:38,044 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 8dcb99358a44,44347,1730981734185; numProcessing=3 2024-11-07T12:15:38,073 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/8dcb99358a44,44347,1730981734185 already deleted, retry=false 2024-11-07T12:15:38,073 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 8dcb99358a44,44347,1730981734185 expired; onlineServers=0 2024-11-07T12:15:38,073 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server '8dcb99358a44,34129,1730981733968' ***** 2024-11-07T12:15:38,073 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-07T12:15:38,074 DEBUG [M:0;8dcb99358a44:34129 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@49c04c6d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=8dcb99358a44/172.17.0.2:0 2024-11-07T12:15:38,074 INFO [M:0;8dcb99358a44:34129 {}] regionserver.HRegionServer(1224): stopping server 8dcb99358a44,34129,1730981733968 2024-11-07T12:15:38,074 INFO [M:0;8dcb99358a44:34129 {}] regionserver.HRegionServer(1250): stopping server 8dcb99358a44,34129,1730981733968; all regions closed. 2024-11-07T12:15:38,074 DEBUG [M:0;8dcb99358a44:34129 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T12:15:38,074 DEBUG [M:0;8dcb99358a44:34129 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-07T12:15:38,074 DEBUG [M:0;8dcb99358a44:34129 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-07T12:15:38,074 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-07T12:15:38,074 DEBUG [master/8dcb99358a44:0:becomeActiveMaster-HFileCleaner.small.0-1730981734946 {}] cleaner.HFileCleaner(306): Exit Thread[master/8dcb99358a44:0:becomeActiveMaster-HFileCleaner.small.0-1730981734946,5,FailOnTimeoutGroup] 2024-11-07T12:15:38,074 DEBUG [master/8dcb99358a44:0:becomeActiveMaster-HFileCleaner.large.0-1730981734945 {}] cleaner.HFileCleaner(306): Exit Thread[master/8dcb99358a44:0:becomeActiveMaster-HFileCleaner.large.0-1730981734945,5,FailOnTimeoutGroup] 2024-11-07T12:15:38,075 INFO [M:0;8dcb99358a44:34129 {}] hbase.ChoreService(370): Chore service for: master/8dcb99358a44:0 had [] on shutdown 2024-11-07T12:15:38,075 DEBUG [M:0;8dcb99358a44:34129 {}] master.HMaster(1733): Stopping service threads 2024-11-07T12:15:38,075 INFO [M:0;8dcb99358a44:34129 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-07T12:15:38,076 INFO [M:0;8dcb99358a44:34129 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-07T12:15:38,076 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-07T12:15:38,086 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34129-0x1011682c8a80000, quorum=127.0.0.1:52285, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-07T12:15:38,086 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34129-0x1011682c8a80000, quorum=127.0.0.1:52285, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T12:15:38,086 DEBUG [M:0;8dcb99358a44:34129 {}] zookeeper.ZKUtil(347): master:34129-0x1011682c8a80000, quorum=127.0.0.1:52285, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-07T12:15:38,086 WARN [M:0;8dcb99358a44:34129 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-07T12:15:38,086 INFO [M:0;8dcb99358a44:34129 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-11-07T12:15:38,087 INFO [M:0;8dcb99358a44:34129 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-07T12:15:38,087 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34129-0x1011682c8a80000, quorum=127.0.0.1:52285, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-07T12:15:38,087 DEBUG [M:0;8dcb99358a44:34129 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-07T12:15:38,087 INFO [M:0;8dcb99358a44:34129 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:15:38,087 DEBUG [M:0;8dcb99358a44:34129 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:15:38,088 DEBUG [M:0;8dcb99358a44:34129 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-07T12:15:38,088 DEBUG [M:0;8dcb99358a44:34129 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:15:38,088 INFO [M:0;8dcb99358a44:34129 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.79 KB heapSize=54.26 KB 2024-11-07T12:15:38,111 DEBUG [M:0;8dcb99358a44:34129 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44787/user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/35900424f0f4458bb2c81258b0705052 is 82, key is hbase:meta,,1/info:regioninfo/1730981735781/Put/seqid=0 2024-11-07T12:15:38,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34265 is added to blk_1073741845_1021 (size=5672) 2024-11-07T12:15:38,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34531 is added to blk_1073741845_1021 (size=5672) 2024-11-07T12:15:38,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37239 is added to blk_1073741845_1021 (size=5672) 2024-11-07T12:15:38,119 INFO [M:0;8dcb99358a44:34129 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=111 (bloomFilter=true), to=hdfs://localhost:44787/user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/35900424f0f4458bb2c81258b0705052 2024-11-07T12:15:38,141 DEBUG [M:0;8dcb99358a44:34129 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44787/user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/16f4ed345da247a8b4d65ae72092e279 is 747, key is \x00\x00\x00\x00\x00\x00\x00\x09/proc:d/1730981737266/Put/seqid=0 2024-11-07T12:15:38,144 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44347-0x1011682c8a80002, quorum=127.0.0.1:52285, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T12:15:38,144 INFO [RS:1;8dcb99358a44:44347 {}] regionserver.HRegionServer(1307): Exiting; stopping=8dcb99358a44,44347,1730981734185; zookeeper connection closed. 2024-11-07T12:15:38,144 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44347-0x1011682c8a80002, quorum=127.0.0.1:52285, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T12:15:38,145 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@5f73241d {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@5f73241d 2024-11-07T12:15:38,145 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-11-07T12:15:38,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34531 is added to blk_1073741846_1022 (size=7797) 2024-11-07T12:15:38,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34265 is added to blk_1073741846_1022 (size=7797) 2024-11-07T12:15:38,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37239 is added to blk_1073741846_1022 (size=7797) 2024-11-07T12:15:38,149 INFO [M:0;8dcb99358a44:34129 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=43.11 KB at sequenceid=111 (bloomFilter=true), to=hdfs://localhost:44787/user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/16f4ed345da247a8b4d65ae72092e279 2024-11-07T12:15:38,170 DEBUG [M:0;8dcb99358a44:34129 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44787/user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3345ee0d3ef64dbeb100a5a821c0922a is 69, key is 8dcb99358a44,36801,1730981734127/rs:state/1730981735060/Put/seqid=0 2024-11-07T12:15:38,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34531 is added to blk_1073741847_1023 (size=5294) 2024-11-07T12:15:38,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37239 is added to blk_1073741847_1023 (size=5294) 2024-11-07T12:15:38,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34265 is added to blk_1073741847_1023 (size=5294) 2024-11-07T12:15:38,178 INFO [M:0;8dcb99358a44:34129 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=111 (bloomFilter=true), to=hdfs://localhost:44787/user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3345ee0d3ef64dbeb100a5a821c0922a 2024-11-07T12:15:38,186 DEBUG [M:0;8dcb99358a44:34129 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44787/user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/35900424f0f4458bb2c81258b0705052 as hdfs://localhost:44787/user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/35900424f0f4458bb2c81258b0705052 2024-11-07T12:15:38,193 INFO [M:0;8dcb99358a44:34129 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44787/user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/35900424f0f4458bb2c81258b0705052, entries=8, sequenceid=111, filesize=5.5 K 2024-11-07T12:15:38,194 DEBUG [M:0;8dcb99358a44:34129 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44787/user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/16f4ed345da247a8b4d65ae72092e279 as hdfs://localhost:44787/user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/16f4ed345da247a8b4d65ae72092e279 2024-11-07T12:15:38,201 INFO [M:0;8dcb99358a44:34129 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44787/user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/16f4ed345da247a8b4d65ae72092e279, entries=13, sequenceid=111, filesize=7.6 K 2024-11-07T12:15:38,203 DEBUG [M:0;8dcb99358a44:34129 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44787/user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3345ee0d3ef64dbeb100a5a821c0922a as hdfs://localhost:44787/user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/3345ee0d3ef64dbeb100a5a821c0922a 2024-11-07T12:15:38,210 INFO [M:0;8dcb99358a44:34129 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44787/user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/3345ee0d3ef64dbeb100a5a821c0922a, entries=3, sequenceid=111, filesize=5.2 K 2024-11-07T12:15:38,211 INFO [M:0;8dcb99358a44:34129 {}] regionserver.HRegion(3040): Finished flush of dataSize ~43.79 KB/44839, heapSize ~53.96 KB/55256, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 123ms, sequenceid=111, compaction requested=false 2024-11-07T12:15:38,213 INFO [M:0;8dcb99358a44:34129 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T12:15:38,213 DEBUG [M:0;8dcb99358a44:34129 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-07T12:15:38,213 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/6aacdfbf-cc82-291b-827f-199fdd926bc4/MasterData/WALs/8dcb99358a44,34129,1730981733968 2024-11-07T12:15:38,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34531 is added to blk_1073741830_1006 (size=52554) 2024-11-07T12:15:38,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37239 is added to blk_1073741830_1006 (size=52554) 2024-11-07T12:15:38,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34265 is added to blk_1073741830_1006 (size=52554) 2024-11-07T12:15:38,216 INFO [M:0;8dcb99358a44:34129 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-11-07T12:15:38,216 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-11-07T12:15:38,217 INFO [M:0;8dcb99358a44:34129 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:34129 2024-11-07T12:15:38,225 DEBUG [M:0;8dcb99358a44:34129 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/8dcb99358a44,34129,1730981733968 already deleted, retry=false 2024-11-07T12:15:38,336 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34129-0x1011682c8a80000, quorum=127.0.0.1:52285, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T12:15:38,336 INFO [M:0;8dcb99358a44:34129 {}] regionserver.HRegionServer(1307): Exiting; stopping=8dcb99358a44,34129,1730981733968; zookeeper connection closed. 2024-11-07T12:15:38,336 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34129-0x1011682c8a80000, quorum=127.0.0.1:52285, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T12:15:38,339 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6ed08ade{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:15:38,340 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@54f72b24{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-07T12:15:38,340 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-07T12:15:38,340 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1de7ceda{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-07T12:15:38,340 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@43d8d8de{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94a1b264-07f8-bb43-36bf-3de40a7e9255/hadoop.log.dir/,STOPPED} 2024-11-07T12:15:38,341 WARN [BP-1589988083-172.17.0.2-1730981731858 heartbeating to localhost/127.0.0.1:44787 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-07T12:15:38,341 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-07T12:15:38,341 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-07T12:15:38,341 WARN [BP-1589988083-172.17.0.2-1730981731858 heartbeating to localhost/127.0.0.1:44787 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1589988083-172.17.0.2-1730981731858 (Datanode Uuid bf63df42-278d-49b9-9fd7-4548a8045599) service to localhost/127.0.0.1:44787 2024-11-07T12:15:38,342 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94a1b264-07f8-bb43-36bf-3de40a7e9255/cluster_671de6c1-df2a-7f63-627e-b06ba20bfa15/dfs/data/data5/current/BP-1589988083-172.17.0.2-1730981731858 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:15:38,342 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94a1b264-07f8-bb43-36bf-3de40a7e9255/cluster_671de6c1-df2a-7f63-627e-b06ba20bfa15/dfs/data/data6/current/BP-1589988083-172.17.0.2-1730981731858 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:15:38,342 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-07T12:15:38,344 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@15aff75{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:15:38,345 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@9338a9d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-07T12:15:38,345 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-07T12:15:38,345 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@52790ff{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-07T12:15:38,345 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@43876932{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94a1b264-07f8-bb43-36bf-3de40a7e9255/hadoop.log.dir/,STOPPED} 2024-11-07T12:15:38,346 WARN [BP-1589988083-172.17.0.2-1730981731858 heartbeating to localhost/127.0.0.1:44787 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-07T12:15:38,346 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-07T12:15:38,346 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-07T12:15:38,346 WARN [BP-1589988083-172.17.0.2-1730981731858 heartbeating to localhost/127.0.0.1:44787 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1589988083-172.17.0.2-1730981731858 (Datanode Uuid 1fa3454c-4d8f-4868-9178-23bbe0f4f656) service to localhost/127.0.0.1:44787 2024-11-07T12:15:38,347 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94a1b264-07f8-bb43-36bf-3de40a7e9255/cluster_671de6c1-df2a-7f63-627e-b06ba20bfa15/dfs/data/data3/current/BP-1589988083-172.17.0.2-1730981731858 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:15:38,347 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94a1b264-07f8-bb43-36bf-3de40a7e9255/cluster_671de6c1-df2a-7f63-627e-b06ba20bfa15/dfs/data/data4/current/BP-1589988083-172.17.0.2-1730981731858 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:15:38,347 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-07T12:15:38,349 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5ba3a5d8{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T12:15:38,350 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6fa17b83{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-07T12:15:38,350 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-07T12:15:38,350 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@753a7704{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-07T12:15:38,350 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3806bae2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94a1b264-07f8-bb43-36bf-3de40a7e9255/hadoop.log.dir/,STOPPED} 2024-11-07T12:15:38,351 WARN [BP-1589988083-172.17.0.2-1730981731858 heartbeating to localhost/127.0.0.1:44787 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-07T12:15:38,351 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-07T12:15:38,351 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-07T12:15:38,351 WARN [BP-1589988083-172.17.0.2-1730981731858 heartbeating to localhost/127.0.0.1:44787 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1589988083-172.17.0.2-1730981731858 (Datanode Uuid b0f7ecdf-16dd-427d-9f52-89b89d5db43b) service to localhost/127.0.0.1:44787 2024-11-07T12:15:38,352 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94a1b264-07f8-bb43-36bf-3de40a7e9255/cluster_671de6c1-df2a-7f63-627e-b06ba20bfa15/dfs/data/data1/current/BP-1589988083-172.17.0.2-1730981731858 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:15:38,352 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94a1b264-07f8-bb43-36bf-3de40a7e9255/cluster_671de6c1-df2a-7f63-627e-b06ba20bfa15/dfs/data/data2/current/BP-1589988083-172.17.0.2-1730981731858 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T12:15:38,352 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-07T12:15:38,357 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4a25ecf1{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-07T12:15:38,358 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2ea4011e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-07T12:15:38,358 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-07T12:15:38,358 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5cd6d6cf{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-07T12:15:38,358 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@17455d4e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/94a1b264-07f8-bb43-36bf-3de40a7e9255/hadoop.log.dir/,STOPPED} 2024-11-07T12:15:38,365 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-11-07T12:15:38,394 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down 2024-11-07T12:15:38,401 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=142 (was 105) - Thread LEAK? -, OpenFileDescriptor=525 (was 373) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=275 (was 290), ProcessCount=11 (was 11), AvailableMemoryMB=3935 (was 4146)