2024-12-02 03:42:57,086 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-12-02 03:42:57,100 main DEBUG Took 0.011810 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-02 03:42:57,101 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-02 03:42:57,101 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-02 03:42:57,102 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-02 03:42:57,103 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 03:42:57,114 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-02 03:42:57,134 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 03:42:57,136 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 03:42:57,136 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 03:42:57,137 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 03:42:57,137 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 03:42:57,138 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 03:42:57,139 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 03:42:57,139 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 03:42:57,140 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 03:42:57,140 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 03:42:57,141 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 03:42:57,142 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 03:42:57,142 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 03:42:57,143 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 03:42:57,143 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 03:42:57,143 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 03:42:57,144 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 03:42:57,144 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 03:42:57,145 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 03:42:57,145 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 03:42:57,145 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 03:42:57,146 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 03:42:57,146 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 03:42:57,147 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 03:42:57,147 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 03:42:57,148 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-02 03:42:57,149 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 03:42:57,151 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-02 03:42:57,152 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-02 03:42:57,153 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-02 03:42:57,154 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-02 03:42:57,155 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-02 03:42:57,164 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-02 03:42:57,166 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-02 03:42:57,168 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-02 03:42:57,169 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-02 03:42:57,169 main DEBUG createAppenders(={Console}) 2024-12-02 03:42:57,170 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba initialized 2024-12-02 03:42:57,170 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-12-02 03:42:57,171 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba OK. 2024-12-02 03:42:57,172 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-02 03:42:57,172 main DEBUG OutputStream closed 2024-12-02 03:42:57,172 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-02 03:42:57,173 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-02 03:42:57,173 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@49c7b90e OK 2024-12-02 03:42:57,253 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-02 03:42:57,255 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-02 03:42:57,257 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-02 03:42:57,258 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-02 03:42:57,259 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-02 03:42:57,259 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-02 03:42:57,260 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-02 03:42:57,260 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-02 03:42:57,260 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-02 03:42:57,261 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-02 03:42:57,261 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-02 03:42:57,262 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-02 03:42:57,262 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-02 03:42:57,262 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-02 03:42:57,262 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-02 03:42:57,263 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-02 03:42:57,263 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-02 03:42:57,264 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-02 03:42:57,266 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-02 03:42:57,266 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@35432107) with optional ClassLoader: null 2024-12-02 03:42:57,266 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-02 03:42:57,267 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@35432107] started OK. 2024-12-02T03:42:57,281 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC timeout: 26 mins 2024-12-02 03:42:57,284 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-02 03:42:57,285 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-02T03:42:57,522 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2a1b248-510b-86d1-13a4-5d0f02634deb 2024-12-02T03:42:57,549 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2a1b248-510b-86d1-13a4-5d0f02634deb/cluster_b4d8d50d-9b9e-d2b6-d808-9746eed5b7c6, deleteOnExit=true 2024-12-02T03:42:57,550 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2a1b248-510b-86d1-13a4-5d0f02634deb/test.cache.data in system properties and HBase conf 2024-12-02T03:42:57,551 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2a1b248-510b-86d1-13a4-5d0f02634deb/hadoop.tmp.dir in system properties and HBase conf 2024-12-02T03:42:57,551 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2a1b248-510b-86d1-13a4-5d0f02634deb/hadoop.log.dir in system properties and HBase conf 2024-12-02T03:42:57,552 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2a1b248-510b-86d1-13a4-5d0f02634deb/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-02T03:42:57,552 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2a1b248-510b-86d1-13a4-5d0f02634deb/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-02T03:42:57,553 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-02T03:42:57,648 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-02T03:42:57,741 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-02T03:42:57,745 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2a1b248-510b-86d1-13a4-5d0f02634deb/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-02T03:42:57,746 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2a1b248-510b-86d1-13a4-5d0f02634deb/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-02T03:42:57,747 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2a1b248-510b-86d1-13a4-5d0f02634deb/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-02T03:42:57,747 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2a1b248-510b-86d1-13a4-5d0f02634deb/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-02T03:42:57,748 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2a1b248-510b-86d1-13a4-5d0f02634deb/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-02T03:42:57,748 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2a1b248-510b-86d1-13a4-5d0f02634deb/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-02T03:42:57,749 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2a1b248-510b-86d1-13a4-5d0f02634deb/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-02T03:42:57,749 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2a1b248-510b-86d1-13a4-5d0f02634deb/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-02T03:42:57,750 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2a1b248-510b-86d1-13a4-5d0f02634deb/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-02T03:42:57,750 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2a1b248-510b-86d1-13a4-5d0f02634deb/nfs.dump.dir in system properties and HBase conf 2024-12-02T03:42:57,751 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2a1b248-510b-86d1-13a4-5d0f02634deb/java.io.tmpdir in system properties and HBase conf 2024-12-02T03:42:57,751 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2a1b248-510b-86d1-13a4-5d0f02634deb/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-02T03:42:57,752 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2a1b248-510b-86d1-13a4-5d0f02634deb/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-02T03:42:57,752 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2a1b248-510b-86d1-13a4-5d0f02634deb/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-02T03:42:58,730 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-02T03:42:58,801 INFO [Time-limited test {}] log.Log(170): Logging initialized @2346ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-02T03:42:58,875 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T03:42:58,942 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T03:42:58,967 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T03:42:58,967 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T03:42:58,970 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-02T03:42:58,986 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T03:42:58,989 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@21b7d177{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2a1b248-510b-86d1-13a4-5d0f02634deb/hadoop.log.dir/,AVAILABLE} 2024-12-02T03:42:58,990 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@383d55e4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T03:42:59,165 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@76e4c45c{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2a1b248-510b-86d1-13a4-5d0f02634deb/java.io.tmpdir/jetty-localhost-40545-hadoop-hdfs-3_4_1-tests_jar-_-any-940906307623696285/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-02T03:42:59,174 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4637aff6{HTTP/1.1, (http/1.1)}{localhost:40545} 2024-12-02T03:42:59,174 INFO [Time-limited test {}] server.Server(415): Started @2720ms 2024-12-02T03:42:59,642 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T03:42:59,649 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T03:42:59,650 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T03:42:59,650 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T03:42:59,651 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-02T03:42:59,651 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@550154bd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2a1b248-510b-86d1-13a4-5d0f02634deb/hadoop.log.dir/,AVAILABLE} 2024-12-02T03:42:59,652 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1a2478ad{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T03:42:59,758 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4839957b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2a1b248-510b-86d1-13a4-5d0f02634deb/java.io.tmpdir/jetty-localhost-32819-hadoop-hdfs-3_4_1-tests_jar-_-any-16389246178036972224/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T03:42:59,759 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5306f615{HTTP/1.1, (http/1.1)}{localhost:32819} 2024-12-02T03:42:59,759 INFO [Time-limited test {}] server.Server(415): Started @3304ms 2024-12-02T03:42:59,809 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T03:42:59,907 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T03:42:59,913 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T03:42:59,915 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T03:42:59,915 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T03:42:59,915 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-02T03:42:59,916 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6463ad04{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2a1b248-510b-86d1-13a4-5d0f02634deb/hadoop.log.dir/,AVAILABLE} 2024-12-02T03:42:59,917 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7fa8fa5c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T03:43:00,011 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1c6b8f01{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2a1b248-510b-86d1-13a4-5d0f02634deb/java.io.tmpdir/jetty-localhost-34557-hadoop-hdfs-3_4_1-tests_jar-_-any-15305774877243990709/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T03:43:00,012 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@11f28dd2{HTTP/1.1, (http/1.1)}{localhost:34557} 2024-12-02T03:43:00,012 INFO [Time-limited test {}] server.Server(415): Started @3558ms 2024-12-02T03:43:00,014 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T03:43:00,045 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T03:43:00,049 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T03:43:00,050 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T03:43:00,051 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T03:43:00,051 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-02T03:43:00,052 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@c62369b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2a1b248-510b-86d1-13a4-5d0f02634deb/hadoop.log.dir/,AVAILABLE} 2024-12-02T03:43:00,052 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@24f92c39{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T03:43:00,145 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2e59159d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2a1b248-510b-86d1-13a4-5d0f02634deb/java.io.tmpdir/jetty-localhost-35851-hadoop-hdfs-3_4_1-tests_jar-_-any-14195337095768801441/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T03:43:00,146 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@a8e922f{HTTP/1.1, (http/1.1)}{localhost:35851} 2024-12-02T03:43:00,146 INFO [Time-limited test {}] server.Server(415): Started @3692ms 2024-12-02T03:43:00,148 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T03:43:00,922 WARN [Thread-124 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2a1b248-510b-86d1-13a4-5d0f02634deb/cluster_b4d8d50d-9b9e-d2b6-d808-9746eed5b7c6/data/data4/current/BP-1468540406-172.17.0.2-1733110978283/current, will proceed with Du for space computation calculation, 2024-12-02T03:43:00,922 WARN [Thread-122 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2a1b248-510b-86d1-13a4-5d0f02634deb/cluster_b4d8d50d-9b9e-d2b6-d808-9746eed5b7c6/data/data3/current/BP-1468540406-172.17.0.2-1733110978283/current, will proceed with Du for space computation calculation, 2024-12-02T03:43:00,922 WARN [Thread-125 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2a1b248-510b-86d1-13a4-5d0f02634deb/cluster_b4d8d50d-9b9e-d2b6-d808-9746eed5b7c6/data/data2/current/BP-1468540406-172.17.0.2-1733110978283/current, will proceed with Du for space computation calculation, 2024-12-02T03:43:00,922 WARN [Thread-123 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2a1b248-510b-86d1-13a4-5d0f02634deb/cluster_b4d8d50d-9b9e-d2b6-d808-9746eed5b7c6/data/data1/current/BP-1468540406-172.17.0.2-1733110978283/current, will proceed with Du for space computation calculation, 2024-12-02T03:43:00,961 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T03:43:00,961 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T03:43:01,004 WARN [Thread-142 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2a1b248-510b-86d1-13a4-5d0f02634deb/cluster_b4d8d50d-9b9e-d2b6-d808-9746eed5b7c6/data/data5/current/BP-1468540406-172.17.0.2-1733110978283/current, will proceed with Du for space computation calculation, 2024-12-02T03:43:01,004 WARN [Thread-143 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2a1b248-510b-86d1-13a4-5d0f02634deb/cluster_b4d8d50d-9b9e-d2b6-d808-9746eed5b7c6/data/data6/current/BP-1468540406-172.17.0.2-1733110978283/current, will proceed with Du for space computation calculation, 2024-12-02T03:43:01,005 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc6e7b35cf4ab4e57 with lease ID 0x355c7f2daf47fd32: Processing first storage report for DS-e6f25c0d-84bb-4001-a116-33c4140682e1 from datanode DatanodeRegistration(127.0.0.1:36705, datanodeUuid=ace3bdc1-1fca-4a70-a64a-3486a129408c, infoPort=37283, infoSecurePort=0, ipcPort=41387, storageInfo=lv=-57;cid=testClusterID;nsid=765338224;c=1733110978283) 2024-12-02T03:43:01,006 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc6e7b35cf4ab4e57 with lease ID 0x355c7f2daf47fd32: from storage DS-e6f25c0d-84bb-4001-a116-33c4140682e1 node DatanodeRegistration(127.0.0.1:36705, datanodeUuid=ace3bdc1-1fca-4a70-a64a-3486a129408c, infoPort=37283, infoSecurePort=0, ipcPort=41387, storageInfo=lv=-57;cid=testClusterID;nsid=765338224;c=1733110978283), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-02T03:43:01,006 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9ce59351b0f6fe0a with lease ID 0x355c7f2daf47fd31: Processing first storage report for DS-212d6a92-c2b5-4c10-83d0-74d32e944a07 from datanode DatanodeRegistration(127.0.0.1:37105, datanodeUuid=4259dd88-2ed2-4ea2-8ebe-a37dcf978cb6, infoPort=37455, infoSecurePort=0, ipcPort=41873, storageInfo=lv=-57;cid=testClusterID;nsid=765338224;c=1733110978283) 2024-12-02T03:43:01,006 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9ce59351b0f6fe0a with lease ID 0x355c7f2daf47fd31: from storage DS-212d6a92-c2b5-4c10-83d0-74d32e944a07 node DatanodeRegistration(127.0.0.1:37105, datanodeUuid=4259dd88-2ed2-4ea2-8ebe-a37dcf978cb6, infoPort=37455, infoSecurePort=0, ipcPort=41873, storageInfo=lv=-57;cid=testClusterID;nsid=765338224;c=1733110978283), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-02T03:43:01,007 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc6e7b35cf4ab4e57 with lease ID 0x355c7f2daf47fd32: Processing first storage report for DS-df01b808-cf3b-4f4f-9761-33366b972c83 from datanode DatanodeRegistration(127.0.0.1:36705, datanodeUuid=ace3bdc1-1fca-4a70-a64a-3486a129408c, infoPort=37283, infoSecurePort=0, ipcPort=41387, storageInfo=lv=-57;cid=testClusterID;nsid=765338224;c=1733110978283) 2024-12-02T03:43:01,007 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc6e7b35cf4ab4e57 with lease ID 0x355c7f2daf47fd32: from storage DS-df01b808-cf3b-4f4f-9761-33366b972c83 node DatanodeRegistration(127.0.0.1:36705, datanodeUuid=ace3bdc1-1fca-4a70-a64a-3486a129408c, infoPort=37283, infoSecurePort=0, ipcPort=41387, storageInfo=lv=-57;cid=testClusterID;nsid=765338224;c=1733110978283), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T03:43:01,007 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9ce59351b0f6fe0a with lease ID 0x355c7f2daf47fd31: Processing first storage report for DS-ebbc8457-d50b-4e8f-b051-176e9b35b840 from datanode DatanodeRegistration(127.0.0.1:37105, datanodeUuid=4259dd88-2ed2-4ea2-8ebe-a37dcf978cb6, infoPort=37455, infoSecurePort=0, ipcPort=41873, storageInfo=lv=-57;cid=testClusterID;nsid=765338224;c=1733110978283) 2024-12-02T03:43:01,007 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9ce59351b0f6fe0a with lease ID 0x355c7f2daf47fd31: from storage DS-ebbc8457-d50b-4e8f-b051-176e9b35b840 node DatanodeRegistration(127.0.0.1:37105, datanodeUuid=4259dd88-2ed2-4ea2-8ebe-a37dcf978cb6, infoPort=37455, infoSecurePort=0, ipcPort=41873, storageInfo=lv=-57;cid=testClusterID;nsid=765338224;c=1733110978283), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-02T03:43:01,031 WARN [Thread-103 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T03:43:01,036 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xdbb6c47acf48cfe6 with lease ID 0x355c7f2daf47fd33: Processing first storage report for DS-aecf774a-d3c4-4b4f-8001-aa6792c6d4f5 from datanode DatanodeRegistration(127.0.0.1:46389, datanodeUuid=173fb258-e83f-477d-9e09-7a28ff063424, infoPort=36391, infoSecurePort=0, ipcPort=43801, storageInfo=lv=-57;cid=testClusterID;nsid=765338224;c=1733110978283) 2024-12-02T03:43:01,036 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdbb6c47acf48cfe6 with lease ID 0x355c7f2daf47fd33: from storage DS-aecf774a-d3c4-4b4f-8001-aa6792c6d4f5 node DatanodeRegistration(127.0.0.1:46389, datanodeUuid=173fb258-e83f-477d-9e09-7a28ff063424, infoPort=36391, infoSecurePort=0, ipcPort=43801, storageInfo=lv=-57;cid=testClusterID;nsid=765338224;c=1733110978283), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T03:43:01,037 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xdbb6c47acf48cfe6 with lease ID 0x355c7f2daf47fd33: Processing first storage report for DS-88dfc881-01bd-4b86-8aa3-721a16ba3d1c from datanode DatanodeRegistration(127.0.0.1:46389, datanodeUuid=173fb258-e83f-477d-9e09-7a28ff063424, infoPort=36391, infoSecurePort=0, ipcPort=43801, storageInfo=lv=-57;cid=testClusterID;nsid=765338224;c=1733110978283) 2024-12-02T03:43:01,037 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdbb6c47acf48cfe6 with lease ID 0x355c7f2daf47fd33: from storage DS-88dfc881-01bd-4b86-8aa3-721a16ba3d1c node DatanodeRegistration(127.0.0.1:46389, datanodeUuid=173fb258-e83f-477d-9e09-7a28ff063424, infoPort=36391, infoSecurePort=0, ipcPort=43801, storageInfo=lv=-57;cid=testClusterID;nsid=765338224;c=1733110978283), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T03:43:01,138 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2a1b248-510b-86d1-13a4-5d0f02634deb 2024-12-02T03:43:01,210 WARN [Time-limited test {}] erasurecode.ErasureCodeNative(55): ISA-L support is not available in your platform... using builtin-java codec where applicable 2024-12-02T03:43:01,263 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=162, OpenFileDescriptor=391, MaxFileDescriptor=1048576, SystemLoadAverage=172, ProcessCount=11, AvailableMemoryMB=8323 2024-12-02T03:43:01,266 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-02T03:43:01,276 INFO [Time-limited test {}] hbase.HBaseTestingUtil(821): NOT STARTING DFS 2024-12-02T03:43:01,354 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2a1b248-510b-86d1-13a4-5d0f02634deb/cluster_b4d8d50d-9b9e-d2b6-d808-9746eed5b7c6/zookeeper_0, clientPort=51411, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2a1b248-510b-86d1-13a4-5d0f02634deb/cluster_b4d8d50d-9b9e-d2b6-d808-9746eed5b7c6/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2a1b248-510b-86d1-13a4-5d0f02634deb/cluster_b4d8d50d-9b9e-d2b6-d808-9746eed5b7c6/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-02T03:43:01,371 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=51411 2024-12-02T03:43:01,380 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T03:43:01,382 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T03:43:01,459 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-02T03:43:01,459 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-02T03:43:01,497 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-829934895_22 at /127.0.0.1:45424 [Receiving block BP-1468540406-172.17.0.2-1733110978283:blk_-9223372036854775792_1001] {}] datanode.DataXceiver(331): 127.0.0.1:46389:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45424 dst: /127.0.0.1:46389 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T03:43:01,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46389 is added to blk_-9223372036854775792_1002 (size=7) 2024-12-02T03:43:01,915 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-02T03:43:01,929 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:40287/user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777 with version=8 2024-12-02T03:43:01,929 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:40287/user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/hbase-staging 2024-12-02T03:43:02,005 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-02T03:43:02,245 INFO [Time-limited test {}] client.ConnectionUtils(128): master/e2eaa0f11f7e:0 server-side Connection retries=45 2024-12-02T03:43:02,253 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T03:43:02,254 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-02T03:43:02,258 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-02T03:43:02,258 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T03:43:02,258 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-02T03:43:02,377 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-02T03:43:02,426 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-02T03:43:02,433 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-02T03:43:02,436 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-02T03:43:02,456 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 17888 (auto-detected) 2024-12-02T03:43:02,456 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-02T03:43:02,471 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36013 2024-12-02T03:43:02,488 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:36013 connecting to ZooKeeper ensemble=127.0.0.1:51411 2024-12-02T03:43:02,576 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:360130x0, quorum=127.0.0.1:51411, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-02T03:43:02,579 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:36013-0x101956c759f0000 connected 2024-12-02T03:43:02,665 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T03:43:02,670 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T03:43:02,683 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36013-0x101956c759f0000, quorum=127.0.0.1:51411, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T03:43:02,687 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:40287/user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777, hbase.cluster.distributed=false 2024-12-02T03:43:02,710 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36013-0x101956c759f0000, quorum=127.0.0.1:51411, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-02T03:43:02,714 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36013 2024-12-02T03:43:02,714 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36013 2024-12-02T03:43:02,715 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36013 2024-12-02T03:43:02,715 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36013 2024-12-02T03:43:02,716 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36013 2024-12-02T03:43:02,812 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/e2eaa0f11f7e:0 server-side Connection retries=45 2024-12-02T03:43:02,814 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T03:43:02,814 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-02T03:43:02,814 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-02T03:43:02,814 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T03:43:02,814 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-02T03:43:02,817 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-02T03:43:02,819 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-02T03:43:02,819 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33091 2024-12-02T03:43:02,822 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:33091 connecting to ZooKeeper ensemble=127.0.0.1:51411 2024-12-02T03:43:02,823 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T03:43:02,828 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T03:43:02,851 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:330910x0, quorum=127.0.0.1:51411, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-02T03:43:02,852 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33091-0x101956c759f0001 connected 2024-12-02T03:43:02,852 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33091-0x101956c759f0001, quorum=127.0.0.1:51411, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T03:43:02,857 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-02T03:43:02,865 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-02T03:43:02,868 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33091-0x101956c759f0001, quorum=127.0.0.1:51411, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-02T03:43:02,874 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33091-0x101956c759f0001, quorum=127.0.0.1:51411, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-02T03:43:02,876 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33091 2024-12-02T03:43:02,877 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33091 2024-12-02T03:43:02,877 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33091 2024-12-02T03:43:02,878 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33091 2024-12-02T03:43:02,878 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33091 2024-12-02T03:43:02,894 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/e2eaa0f11f7e:0 server-side Connection retries=45 2024-12-02T03:43:02,895 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T03:43:02,895 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-02T03:43:02,896 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-02T03:43:02,896 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T03:43:02,896 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-02T03:43:02,896 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-02T03:43:02,896 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-02T03:43:02,897 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40787 2024-12-02T03:43:02,900 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:40787 connecting to ZooKeeper ensemble=127.0.0.1:51411 2024-12-02T03:43:02,901 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T03:43:02,906 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T03:43:02,946 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:407870x0, quorum=127.0.0.1:51411, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-02T03:43:02,947 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:40787-0x101956c759f0002 connected 2024-12-02T03:43:02,948 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40787-0x101956c759f0002, quorum=127.0.0.1:51411, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T03:43:02,948 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-02T03:43:02,950 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-02T03:43:02,952 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40787-0x101956c759f0002, quorum=127.0.0.1:51411, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-02T03:43:02,954 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40787-0x101956c759f0002, quorum=127.0.0.1:51411, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-02T03:43:02,955 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40787 2024-12-02T03:43:02,955 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40787 2024-12-02T03:43:02,956 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40787 2024-12-02T03:43:02,957 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40787 2024-12-02T03:43:02,957 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40787 2024-12-02T03:43:02,974 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/e2eaa0f11f7e:0 server-side Connection retries=45 2024-12-02T03:43:02,975 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T03:43:02,975 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-02T03:43:02,975 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-02T03:43:02,975 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T03:43:02,976 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-02T03:43:02,976 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-02T03:43:02,976 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-02T03:43:02,977 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33293 2024-12-02T03:43:02,978 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:33293 connecting to ZooKeeper ensemble=127.0.0.1:51411 2024-12-02T03:43:02,980 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T03:43:02,982 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T03:43:02,996 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:332930x0, quorum=127.0.0.1:51411, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-02T03:43:02,997 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33293-0x101956c759f0003 connected 2024-12-02T03:43:02,997 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33293-0x101956c759f0003, quorum=127.0.0.1:51411, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T03:43:02,997 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-02T03:43:02,998 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-02T03:43:02,999 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33293-0x101956c759f0003, quorum=127.0.0.1:51411, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-02T03:43:03,001 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33293-0x101956c759f0003, quorum=127.0.0.1:51411, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-02T03:43:03,002 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33293 2024-12-02T03:43:03,002 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33293 2024-12-02T03:43:03,005 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33293 2024-12-02T03:43:03,006 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33293 2024-12-02T03:43:03,007 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33293 2024-12-02T03:43:03,030 DEBUG [M:0;e2eaa0f11f7e:36013 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;e2eaa0f11f7e:36013 2024-12-02T03:43:03,031 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/e2eaa0f11f7e,36013,1733110982095 2024-12-02T03:43:03,047 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33091-0x101956c759f0001, quorum=127.0.0.1:51411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T03:43:03,047 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36013-0x101956c759f0000, quorum=127.0.0.1:51411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T03:43:03,047 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x101956c759f0003, quorum=127.0.0.1:51411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T03:43:03,047 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40787-0x101956c759f0002, quorum=127.0.0.1:51411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T03:43:03,049 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36013-0x101956c759f0000, quorum=127.0.0.1:51411, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/e2eaa0f11f7e,36013,1733110982095 2024-12-02T03:43:03,079 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x101956c759f0003, quorum=127.0.0.1:51411, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-02T03:43:03,079 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33091-0x101956c759f0001, quorum=127.0.0.1:51411, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-02T03:43:03,079 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40787-0x101956c759f0002, quorum=127.0.0.1:51411, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-02T03:43:03,079 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x101956c759f0003, quorum=127.0.0.1:51411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T03:43:03,079 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33091-0x101956c759f0001, quorum=127.0.0.1:51411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T03:43:03,079 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36013-0x101956c759f0000, quorum=127.0.0.1:51411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T03:43:03,079 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40787-0x101956c759f0002, quorum=127.0.0.1:51411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T03:43:03,080 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36013-0x101956c759f0000, quorum=127.0.0.1:51411, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-02T03:43:03,082 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/e2eaa0f11f7e,36013,1733110982095 from backup master directory 2024-12-02T03:43:03,093 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x101956c759f0003, quorum=127.0.0.1:51411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T03:43:03,093 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33091-0x101956c759f0001, quorum=127.0.0.1:51411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T03:43:03,093 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40787-0x101956c759f0002, quorum=127.0.0.1:51411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T03:43:03,093 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36013-0x101956c759f0000, quorum=127.0.0.1:51411, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/e2eaa0f11f7e,36013,1733110982095 2024-12-02T03:43:03,093 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36013-0x101956c759f0000, quorum=127.0.0.1:51411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T03:43:03,094 WARN [master/e2eaa0f11f7e:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-02T03:43:03,094 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=e2eaa0f11f7e,36013,1733110982095 2024-12-02T03:43:03,096 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-02T03:43:03,098 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-02T03:43:03,157 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:40287/user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/hbase.id] with ID: 99423827-3100-4523-8a00-e40c70e3d16a 2024-12-02T03:43:03,157 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:40287/user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/.tmp/hbase.id 2024-12-02T03:43:03,163 WARN [master/e2eaa0f11f7e:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-02T03:43:03,163 WARN [master/e2eaa0f11f7e:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-02T03:43:03,166 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-829934895_22 at /127.0.0.1:59492 [Receiving block BP-1468540406-172.17.0.2-1733110978283:blk_-9223372036854775776_1003] {}] datanode.DataXceiver(331): 127.0.0.1:46389:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59492 dst: /127.0.0.1:46389 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T03:43:03,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46389 is added to blk_-9223372036854775776_1004 (size=42) 2024-12-02T03:43:03,173 WARN [master/e2eaa0f11f7e:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-02T03:43:03,173 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:40287/user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/.tmp/hbase.id]:[hdfs://localhost:40287/user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/hbase.id] 2024-12-02T03:43:03,215 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T03:43:03,220 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-02T03:43:03,237 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 15ms. 2024-12-02T03:43:03,259 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33091-0x101956c759f0001, quorum=127.0.0.1:51411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T03:43:03,259 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40787-0x101956c759f0002, quorum=127.0.0.1:51411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T03:43:03,260 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36013-0x101956c759f0000, quorum=127.0.0.1:51411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T03:43:03,259 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x101956c759f0003, quorum=127.0.0.1:51411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T03:43:03,271 WARN [master/e2eaa0f11f7e:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-02T03:43:03,271 WARN [master/e2eaa0f11f7e:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-02T03:43:03,274 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-829934895_22 at /127.0.0.1:44076 [Receiving block BP-1468540406-172.17.0.2-1733110978283:blk_-9223372036854775760_1005] {}] datanode.DataXceiver(331): 127.0.0.1:37105:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44076 dst: /127.0.0.1:37105 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T03:43:03,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37105 is added to blk_-9223372036854775760_1006 (size=196) 2024-12-02T03:43:03,280 WARN [master/e2eaa0f11f7e:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-02T03:43:03,293 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T03:43:03,295 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-02T03:43:03,300 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-02T03:43:03,323 WARN [master/e2eaa0f11f7e:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-02T03:43:03,324 WARN [master/e2eaa0f11f7e:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-02T03:43:03,327 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-829934895_22 at /127.0.0.1:44104 [Receiving block BP-1468540406-172.17.0.2-1733110978283:blk_-9223372036854775744_1007] {}] datanode.DataXceiver(331): 127.0.0.1:37105:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44104 dst: /127.0.0.1:37105 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T03:43:03,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37105 is added to blk_-9223372036854775744_1008 (size=1189) 2024-12-02T03:43:03,332 WARN [master/e2eaa0f11f7e:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-02T03:43:03,353 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:40287/user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/MasterData/data/master/store 2024-12-02T03:43:03,369 WARN [master/e2eaa0f11f7e:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-02T03:43:03,369 WARN [master/e2eaa0f11f7e:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-02T03:43:03,372 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-829934895_22 at /127.0.0.1:55268 [Receiving block BP-1468540406-172.17.0.2-1733110978283:blk_-9223372036854775728_1009] {}] datanode.DataXceiver(331): 127.0.0.1:36705:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55268 dst: /127.0.0.1:36705 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T03:43:03,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36705 is added to blk_-9223372036854775728_1010 (size=34) 2024-12-02T03:43:03,377 WARN [master/e2eaa0f11f7e:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-02T03:43:03,381 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-02T03:43:03,384 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T03:43:03,385 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-02T03:43:03,385 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T03:43:03,385 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T03:43:03,387 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-02T03:43:03,387 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T03:43:03,387 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T03:43:03,388 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733110983385Disabling compacts and flushes for region at 1733110983385Disabling writes for close at 1733110983387 (+2 ms)Writing region close event to WAL at 1733110983387Closed at 1733110983387 2024-12-02T03:43:03,390 WARN [master/e2eaa0f11f7e:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:40287/user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/MasterData/data/master/store/.initializing 2024-12-02T03:43:03,390 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:40287/user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/MasterData/WALs/e2eaa0f11f7e,36013,1733110982095 2024-12-02T03:43:03,397 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-02T03:43:03,412 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e2eaa0f11f7e%2C36013%2C1733110982095, suffix=, logDir=hdfs://localhost:40287/user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/MasterData/WALs/e2eaa0f11f7e,36013,1733110982095, archiveDir=hdfs://localhost:40287/user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/MasterData/oldWALs, maxLogs=10 2024-12-02T03:43:03,441 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/MasterData/WALs/e2eaa0f11f7e,36013,1733110982095/e2eaa0f11f7e%2C36013%2C1733110982095.1733110983418, exclude list is [], retry=0 2024-12-02T03:43:03,457 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.initialize(FanOutOneBlockAsyncDFSOutputHelper.java:413) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:472) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:467) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.NettyFutureUtils.lambda$addListener$0(NettyFutureUtils.java:56) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListener0(DefaultPromise.java:590) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners0(DefaultPromise.java:583) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListenersNow(DefaultPromise.java:559) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners(DefaultPromise.java:492) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setValue0(DefaultPromise.java:636) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setSuccess0(DefaultPromise.java:625) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.trySuccess(DefaultPromise.java:105) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPromise.trySuccess(DefaultChannelPromise.java:84) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.fulfillConnectPromise(AbstractEpollChannel.java:658) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.finishConnect(AbstractEpollChannel.java:696) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.epollOutReady(AbstractEpollChannel.java:567) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:491) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T03:43:03,458 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36705,DS-e6f25c0d-84bb-4001-a116-33c4140682e1,DISK] 2024-12-02T03:43:03,458 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37105,DS-212d6a92-c2b5-4c10-83d0-74d32e944a07,DISK] 2024-12-02T03:43:03,458 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46389,DS-aecf774a-d3c4-4b4f-8001-aa6792c6d4f5,DISK] 2024-12-02T03:43:03,461 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-02T03:43:03,494 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/MasterData/WALs/e2eaa0f11f7e,36013,1733110982095/e2eaa0f11f7e%2C36013%2C1733110982095.1733110983418 2024-12-02T03:43:03,495 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:37283:37283),(127.0.0.1/127.0.0.1:37455:37455),(127.0.0.1/127.0.0.1:36391:36391)] 2024-12-02T03:43:03,495 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-02T03:43:03,495 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T03:43:03,498 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T03:43:03,499 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T03:43:03,530 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T03:43:03,554 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-02T03:43:03,557 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T03:43:03,559 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T03:43:03,559 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T03:43:03,563 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-02T03:43:03,563 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T03:43:03,564 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T03:43:03,564 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T03:43:03,567 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-02T03:43:03,567 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T03:43:03,568 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T03:43:03,568 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T03:43:03,571 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-02T03:43:03,571 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T03:43:03,572 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T03:43:03,572 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T03:43:03,576 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40287/user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-02T03:43:03,577 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40287/user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-02T03:43:03,581 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T03:43:03,582 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T03:43:03,586 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-02T03:43:03,589 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T03:43:03,594 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40287/user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T03:43:03,595 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59863525, jitterRate=-0.10796396434307098}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-02T03:43:03,601 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733110983509Initializing all the Stores at 1733110983511 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733110983512 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733110983513 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733110983513Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733110983513Cleaning up temporary data from old regions at 1733110983582 (+69 ms)Region opened successfully at 1733110983600 (+18 ms) 2024-12-02T03:43:03,602 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-02T03:43:03,632 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@521ab734, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e2eaa0f11f7e/172.17.0.2:0 2024-12-02T03:43:03,660 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-02T03:43:03,670 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-02T03:43:03,670 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-02T03:43:03,672 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-02T03:43:03,673 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-12-02T03:43:03,677 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 3 msec 2024-12-02T03:43:03,677 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-02T03:43:03,700 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-02T03:43:03,708 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36013-0x101956c759f0000, quorum=127.0.0.1:51411, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-02T03:43:03,743 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-02T03:43:03,745 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-02T03:43:03,747 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36013-0x101956c759f0000, quorum=127.0.0.1:51411, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-02T03:43:03,754 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-02T03:43:03,756 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-02T03:43:03,760 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36013-0x101956c759f0000, quorum=127.0.0.1:51411, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-02T03:43:03,771 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-02T03:43:03,772 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36013-0x101956c759f0000, quorum=127.0.0.1:51411, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-02T03:43:03,779 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-02T03:43:03,796 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36013-0x101956c759f0000, quorum=127.0.0.1:51411, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-02T03:43:03,804 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-02T03:43:03,812 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40787-0x101956c759f0002, quorum=127.0.0.1:51411, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-02T03:43:03,812 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x101956c759f0003, quorum=127.0.0.1:51411, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-02T03:43:03,812 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36013-0x101956c759f0000, quorum=127.0.0.1:51411, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-02T03:43:03,812 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33091-0x101956c759f0001, quorum=127.0.0.1:51411, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-02T03:43:03,812 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40787-0x101956c759f0002, quorum=127.0.0.1:51411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T03:43:03,812 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33091-0x101956c759f0001, quorum=127.0.0.1:51411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T03:43:03,812 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x101956c759f0003, quorum=127.0.0.1:51411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T03:43:03,812 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36013-0x101956c759f0000, quorum=127.0.0.1:51411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T03:43:03,816 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=e2eaa0f11f7e,36013,1733110982095, sessionid=0x101956c759f0000, setting cluster-up flag (Was=false) 2024-12-02T03:43:03,846 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36013-0x101956c759f0000, quorum=127.0.0.1:51411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T03:43:03,846 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40787-0x101956c759f0002, quorum=127.0.0.1:51411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T03:43:03,846 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x101956c759f0003, quorum=127.0.0.1:51411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T03:43:03,846 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33091-0x101956c759f0001, quorum=127.0.0.1:51411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T03:43:03,876 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-02T03:43:03,881 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=e2eaa0f11f7e,36013,1733110982095 2024-12-02T03:43:03,905 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x101956c759f0003, quorum=127.0.0.1:51411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T03:43:03,905 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33091-0x101956c759f0001, quorum=127.0.0.1:51411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T03:43:03,905 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40787-0x101956c759f0002, quorum=127.0.0.1:51411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T03:43:03,905 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36013-0x101956c759f0000, quorum=127.0.0.1:51411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T03:43:03,929 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-02T03:43:03,931 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=e2eaa0f11f7e,36013,1733110982095 2024-12-02T03:43:03,939 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:40287/user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-02T03:43:04,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37105 is added to blk_-9223372036854775788_1002 (size=7) 2024-12-02T03:43:04,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36705 is added to blk_-9223372036854775789_1002 (size=7) 2024-12-02T03:43:04,005 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-02T03:43:04,014 INFO [RS:0;e2eaa0f11f7e:33091 {}] regionserver.HRegionServer(746): ClusterId : 99423827-3100-4523-8a00-e40c70e3d16a 2024-12-02T03:43:04,014 INFO [RS:1;e2eaa0f11f7e:40787 {}] regionserver.HRegionServer(746): ClusterId : 99423827-3100-4523-8a00-e40c70e3d16a 2024-12-02T03:43:04,014 INFO [RS:2;e2eaa0f11f7e:33293 {}] regionserver.HRegionServer(746): ClusterId : 99423827-3100-4523-8a00-e40c70e3d16a 2024-12-02T03:43:04,015 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-02T03:43:04,016 DEBUG [RS:0;e2eaa0f11f7e:33091 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-02T03:43:04,016 DEBUG [RS:2;e2eaa0f11f7e:33293 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-02T03:43:04,016 DEBUG [RS:1;e2eaa0f11f7e:40787 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-02T03:43:04,021 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-02T03:43:04,026 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: e2eaa0f11f7e,36013,1733110982095 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-02T03:43:04,039 DEBUG [RS:2;e2eaa0f11f7e:33293 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-02T03:43:04,039 DEBUG [RS:0;e2eaa0f11f7e:33091 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-02T03:43:04,039 DEBUG [RS:1;e2eaa0f11f7e:40787 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-02T03:43:04,039 DEBUG [RS:2;e2eaa0f11f7e:33293 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-02T03:43:04,039 DEBUG [RS:0;e2eaa0f11f7e:33091 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-02T03:43:04,039 DEBUG [RS:1;e2eaa0f11f7e:40787 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-02T03:43:04,040 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/e2eaa0f11f7e:0, corePoolSize=5, maxPoolSize=5 2024-12-02T03:43:04,040 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/e2eaa0f11f7e:0, corePoolSize=5, maxPoolSize=5 2024-12-02T03:43:04,040 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/e2eaa0f11f7e:0, corePoolSize=5, maxPoolSize=5 2024-12-02T03:43:04,040 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/e2eaa0f11f7e:0, corePoolSize=5, maxPoolSize=5 2024-12-02T03:43:04,041 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/e2eaa0f11f7e:0, corePoolSize=10, maxPoolSize=10 2024-12-02T03:43:04,041 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T03:43:04,041 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/e2eaa0f11f7e:0, corePoolSize=2, maxPoolSize=2 2024-12-02T03:43:04,041 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T03:43:04,045 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733111014045 2024-12-02T03:43:04,047 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-02T03:43:04,047 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-02T03:43:04,047 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-02T03:43:04,048 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-02T03:43:04,051 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-02T03:43:04,051 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-02T03:43:04,052 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-02T03:43:04,052 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-02T03:43:04,052 DEBUG [RS:1;e2eaa0f11f7e:40787 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-02T03:43:04,052 DEBUG [RS:2;e2eaa0f11f7e:33293 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-02T03:43:04,052 DEBUG [RS:0;e2eaa0f11f7e:33091 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-02T03:43:04,053 DEBUG [RS:2;e2eaa0f11f7e:33293 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2d353c0f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e2eaa0f11f7e/172.17.0.2:0 2024-12-02T03:43:04,053 DEBUG [RS:0;e2eaa0f11f7e:33091 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6a51ef54, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e2eaa0f11f7e/172.17.0.2:0 2024-12-02T03:43:04,053 DEBUG [RS:1;e2eaa0f11f7e:40787 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@164ddedb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e2eaa0f11f7e/172.17.0.2:0 2024-12-02T03:43:04,053 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T03:43:04,054 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-02T03:43:04,053 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-02T03:43:04,056 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-02T03:43:04,058 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-02T03:43:04,058 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-02T03:43:04,065 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-02T03:43:04,066 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-02T03:43:04,067 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-02T03:43:04,068 DEBUG [RS:2;e2eaa0f11f7e:33293 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;e2eaa0f11f7e:33293 2024-12-02T03:43:04,068 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-02T03:43:04,071 INFO [RS:2;e2eaa0f11f7e:33293 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-02T03:43:04,071 INFO [RS:2;e2eaa0f11f7e:33293 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-02T03:43:04,071 DEBUG [RS:2;e2eaa0f11f7e:33293 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-02T03:43:04,071 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/e2eaa0f11f7e:0:becomeActiveMaster-HFileCleaner.large.0-1733110984067,5,FailOnTimeoutGroup] 2024-12-02T03:43:04,072 DEBUG [RS:0;e2eaa0f11f7e:33091 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;e2eaa0f11f7e:33091 2024-12-02T03:43:04,072 DEBUG [RS:1;e2eaa0f11f7e:40787 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;e2eaa0f11f7e:40787 2024-12-02T03:43:04,072 INFO [RS:1;e2eaa0f11f7e:40787 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-02T03:43:04,072 INFO [RS:0;e2eaa0f11f7e:33091 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-02T03:43:04,072 INFO [RS:1;e2eaa0f11f7e:40787 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-02T03:43:04,072 INFO [RS:0;e2eaa0f11f7e:33091 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-02T03:43:04,072 DEBUG [RS:1;e2eaa0f11f7e:40787 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-02T03:43:04,072 DEBUG [RS:0;e2eaa0f11f7e:33091 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-02T03:43:04,073 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/e2eaa0f11f7e:0:becomeActiveMaster-HFileCleaner.small.0-1733110984071,5,FailOnTimeoutGroup] 2024-12-02T03:43:04,073 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-02T03:43:04,073 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-02T03:43:04,074 INFO [RS:1;e2eaa0f11f7e:40787 {}] regionserver.HRegionServer(2659): reportForDuty to master=e2eaa0f11f7e,36013,1733110982095 with port=40787, startcode=1733110982894 2024-12-02T03:43:04,074 INFO [RS:2;e2eaa0f11f7e:33293 {}] regionserver.HRegionServer(2659): reportForDuty to master=e2eaa0f11f7e,36013,1733110982095 with port=33293, startcode=1733110982974 2024-12-02T03:43:04,074 INFO [RS:0;e2eaa0f11f7e:33091 {}] regionserver.HRegionServer(2659): reportForDuty to master=e2eaa0f11f7e,36013,1733110982095 with port=33091, startcode=1733110982783 2024-12-02T03:43:04,074 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-02T03:43:04,075 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-02T03:43:04,078 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-829934895_22 at /127.0.0.1:55294 [Receiving block BP-1468540406-172.17.0.2-1733110978283:blk_-9223372036854775712_1012] {}] datanode.DataXceiver(331): 127.0.0.1:36705:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55294 dst: /127.0.0.1:36705 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T03:43:04,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36705 is added to blk_-9223372036854775712_1013 (size=1321) 2024-12-02T03:43:04,085 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-02T03:43:04,086 DEBUG [RS:1;e2eaa0f11f7e:40787 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-02T03:43:04,086 DEBUG [RS:2;e2eaa0f11f7e:33293 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-02T03:43:04,086 DEBUG [RS:0;e2eaa0f11f7e:33091 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-02T03:43:04,087 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:40287/user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-02T03:43:04,087 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:40287/user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777 2024-12-02T03:43:04,099 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-02T03:43:04,100 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-02T03:43:04,109 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-829934895_22 at /127.0.0.1:55316 [Receiving block BP-1468540406-172.17.0.2-1733110978283:blk_-9223372036854775696_1014] {}] datanode.DataXceiver(331): 127.0.0.1:36705:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55316 dst: /127.0.0.1:36705 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T03:43:04,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36705 is added to blk_-9223372036854775696_1015 (size=32) 2024-12-02T03:43:04,120 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-02T03:43:04,121 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T03:43:04,125 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-02T03:43:04,128 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-02T03:43:04,128 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T03:43:04,129 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T03:43:04,130 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43193, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-02T03:43:04,130 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55501, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-02T03:43:04,130 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-02T03:43:04,130 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54491, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-02T03:43:04,132 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-02T03:43:04,133 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T03:43:04,134 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T03:43:04,134 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-02T03:43:04,135 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36013 {}] master.ServerManager(363): Checking decommissioned status of RegionServer e2eaa0f11f7e,33293,1733110982974 2024-12-02T03:43:04,137 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-02T03:43:04,137 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T03:43:04,138 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36013 {}] master.ServerManager(517): Registering regionserver=e2eaa0f11f7e,33293,1733110982974 2024-12-02T03:43:04,138 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T03:43:04,139 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-02T03:43:04,142 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-02T03:43:04,143 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T03:43:04,144 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T03:43:04,144 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-02T03:43:04,146 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40287/user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/data/hbase/meta/1588230740 2024-12-02T03:43:04,147 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40287/user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/data/hbase/meta/1588230740 2024-12-02T03:43:04,149 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36013 {}] master.ServerManager(363): Checking decommissioned status of RegionServer e2eaa0f11f7e,33091,1733110982783 2024-12-02T03:43:04,149 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36013 {}] master.ServerManager(517): Registering regionserver=e2eaa0f11f7e,33091,1733110982783 2024-12-02T03:43:04,151 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-02T03:43:04,151 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-02T03:43:04,152 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-02T03:43:04,152 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36013 {}] master.ServerManager(363): Checking decommissioned status of RegionServer e2eaa0f11f7e,40787,1733110982894 2024-12-02T03:43:04,153 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36013 {}] master.ServerManager(517): Registering regionserver=e2eaa0f11f7e,40787,1733110982894 2024-12-02T03:43:04,153 DEBUG [RS:2;e2eaa0f11f7e:33293 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40287/user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777 2024-12-02T03:43:04,153 DEBUG [RS:0;e2eaa0f11f7e:33091 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40287/user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777 2024-12-02T03:43:04,153 DEBUG [RS:2;e2eaa0f11f7e:33293 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40287 2024-12-02T03:43:04,153 DEBUG [RS:0;e2eaa0f11f7e:33091 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40287 2024-12-02T03:43:04,153 DEBUG [RS:2;e2eaa0f11f7e:33293 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-02T03:43:04,153 DEBUG [RS:0;e2eaa0f11f7e:33091 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-02T03:43:04,156 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-02T03:43:04,157 DEBUG [RS:1;e2eaa0f11f7e:40787 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40287/user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777 2024-12-02T03:43:04,157 DEBUG [RS:1;e2eaa0f11f7e:40787 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40287 2024-12-02T03:43:04,157 DEBUG [RS:1;e2eaa0f11f7e:40787 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-02T03:43:04,164 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40287/user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T03:43:04,165 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64093513, jitterRate=-0.044932231307029724}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-02T03:43:04,168 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733110984121Initializing all the Stores at 1733110984124 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733110984124Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733110984124Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733110984125 (+1 ms)Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733110984125Cleaning up temporary data from old regions at 1733110984151 (+26 ms)Region opened successfully at 1733110984168 (+17 ms) 2024-12-02T03:43:04,169 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-02T03:43:04,169 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-02T03:43:04,169 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-02T03:43:04,169 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-02T03:43:04,169 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-02T03:43:04,171 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-02T03:43:04,171 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733110984169Disabling compacts and flushes for region at 1733110984169Disabling writes for close at 1733110984169Writing region close event to WAL at 1733110984170 (+1 ms)Closed at 1733110984171 (+1 ms) 2024-12-02T03:43:04,174 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-02T03:43:04,174 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-02T03:43:04,180 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-02T03:43:04,189 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-02T03:43:04,193 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-02T03:43:04,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36013-0x101956c759f0000, quorum=127.0.0.1:51411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-02T03:43:04,226 DEBUG [RS:2;e2eaa0f11f7e:33293 {}] zookeeper.ZKUtil(111): regionserver:33293-0x101956c759f0003, quorum=127.0.0.1:51411, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/e2eaa0f11f7e,33293,1733110982974 2024-12-02T03:43:04,226 DEBUG [RS:0;e2eaa0f11f7e:33091 {}] zookeeper.ZKUtil(111): regionserver:33091-0x101956c759f0001, quorum=127.0.0.1:51411, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/e2eaa0f11f7e,33091,1733110982783 2024-12-02T03:43:04,226 WARN [RS:0;e2eaa0f11f7e:33091 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-02T03:43:04,226 WARN [RS:2;e2eaa0f11f7e:33293 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-02T03:43:04,227 INFO [RS:0;e2eaa0f11f7e:33091 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-02T03:43:04,227 INFO [RS:2;e2eaa0f11f7e:33293 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-02T03:43:04,227 DEBUG [RS:0;e2eaa0f11f7e:33091 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40287/user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/WALs/e2eaa0f11f7e,33091,1733110982783 2024-12-02T03:43:04,227 DEBUG [RS:2;e2eaa0f11f7e:33293 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40287/user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/WALs/e2eaa0f11f7e,33293,1733110982974 2024-12-02T03:43:04,228 DEBUG [RS:1;e2eaa0f11f7e:40787 {}] zookeeper.ZKUtil(111): regionserver:40787-0x101956c759f0002, quorum=127.0.0.1:51411, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/e2eaa0f11f7e,40787,1733110982894 2024-12-02T03:43:04,228 WARN [RS:1;e2eaa0f11f7e:40787 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-02T03:43:04,228 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [e2eaa0f11f7e,33293,1733110982974] 2024-12-02T03:43:04,229 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [e2eaa0f11f7e,33091,1733110982783] 2024-12-02T03:43:04,229 INFO [RS:1;e2eaa0f11f7e:40787 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-02T03:43:04,229 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [e2eaa0f11f7e,40787,1733110982894] 2024-12-02T03:43:04,229 DEBUG [RS:1;e2eaa0f11f7e:40787 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40287/user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/WALs/e2eaa0f11f7e,40787,1733110982894 2024-12-02T03:43:04,255 INFO [RS:2;e2eaa0f11f7e:33293 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-02T03:43:04,255 INFO [RS:0;e2eaa0f11f7e:33091 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-02T03:43:04,255 INFO [RS:1;e2eaa0f11f7e:40787 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-02T03:43:04,267 INFO [RS:2;e2eaa0f11f7e:33293 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-02T03:43:04,267 INFO [RS:0;e2eaa0f11f7e:33091 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-02T03:43:04,267 INFO [RS:1;e2eaa0f11f7e:40787 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-02T03:43:04,273 INFO [RS:1;e2eaa0f11f7e:40787 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-02T03:43:04,273 INFO [RS:0;e2eaa0f11f7e:33091 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-02T03:43:04,273 INFO [RS:2;e2eaa0f11f7e:33293 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-02T03:43:04,273 INFO [RS:0;e2eaa0f11f7e:33091 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T03:43:04,273 INFO [RS:2;e2eaa0f11f7e:33293 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T03:43:04,273 INFO [RS:1;e2eaa0f11f7e:40787 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T03:43:04,275 INFO [RS:2;e2eaa0f11f7e:33293 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-02T03:43:04,275 INFO [RS:0;e2eaa0f11f7e:33091 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-02T03:43:04,275 INFO [RS:1;e2eaa0f11f7e:40787 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-02T03:43:04,282 INFO [RS:0;e2eaa0f11f7e:33091 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-02T03:43:04,282 INFO [RS:2;e2eaa0f11f7e:33293 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-02T03:43:04,282 INFO [RS:1;e2eaa0f11f7e:40787 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-02T03:43:04,284 INFO [RS:2;e2eaa0f11f7e:33293 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-02T03:43:04,284 INFO [RS:0;e2eaa0f11f7e:33091 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-02T03:43:04,284 INFO [RS:1;e2eaa0f11f7e:40787 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-02T03:43:04,284 DEBUG [RS:0;e2eaa0f11f7e:33091 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T03:43:04,284 DEBUG [RS:1;e2eaa0f11f7e:40787 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T03:43:04,284 DEBUG [RS:0;e2eaa0f11f7e:33091 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T03:43:04,284 DEBUG [RS:1;e2eaa0f11f7e:40787 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T03:43:04,284 DEBUG [RS:0;e2eaa0f11f7e:33091 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T03:43:04,284 DEBUG [RS:0;e2eaa0f11f7e:33091 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T03:43:04,285 DEBUG [RS:1;e2eaa0f11f7e:40787 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T03:43:04,285 DEBUG [RS:0;e2eaa0f11f7e:33091 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T03:43:04,285 DEBUG [RS:0;e2eaa0f11f7e:33091 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/e2eaa0f11f7e:0, corePoolSize=2, maxPoolSize=2 2024-12-02T03:43:04,285 DEBUG [RS:0;e2eaa0f11f7e:33091 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T03:43:04,285 DEBUG [RS:0;e2eaa0f11f7e:33091 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T03:43:04,285 DEBUG [RS:1;e2eaa0f11f7e:40787 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T03:43:04,285 DEBUG [RS:0;e2eaa0f11f7e:33091 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T03:43:04,285 DEBUG [RS:2;e2eaa0f11f7e:33293 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T03:43:04,285 DEBUG [RS:1;e2eaa0f11f7e:40787 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T03:43:04,285 DEBUG [RS:2;e2eaa0f11f7e:33293 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T03:43:04,285 DEBUG [RS:0;e2eaa0f11f7e:33091 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T03:43:04,285 DEBUG [RS:1;e2eaa0f11f7e:40787 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/e2eaa0f11f7e:0, corePoolSize=2, maxPoolSize=2 2024-12-02T03:43:04,285 DEBUG [RS:2;e2eaa0f11f7e:33293 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T03:43:04,285 DEBUG [RS:0;e2eaa0f11f7e:33091 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T03:43:04,285 DEBUG [RS:2;e2eaa0f11f7e:33293 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T03:43:04,285 DEBUG [RS:1;e2eaa0f11f7e:40787 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T03:43:04,285 DEBUG [RS:2;e2eaa0f11f7e:33293 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T03:43:04,285 DEBUG [RS:0;e2eaa0f11f7e:33091 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T03:43:04,285 DEBUG [RS:2;e2eaa0f11f7e:33293 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/e2eaa0f11f7e:0, corePoolSize=2, maxPoolSize=2 2024-12-02T03:43:04,285 DEBUG [RS:1;e2eaa0f11f7e:40787 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T03:43:04,286 DEBUG [RS:0;e2eaa0f11f7e:33091 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0, corePoolSize=3, maxPoolSize=3 2024-12-02T03:43:04,286 DEBUG [RS:2;e2eaa0f11f7e:33293 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T03:43:04,286 DEBUG [RS:1;e2eaa0f11f7e:40787 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T03:43:04,286 DEBUG [RS:2;e2eaa0f11f7e:33293 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T03:43:04,286 DEBUG [RS:0;e2eaa0f11f7e:33091 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/e2eaa0f11f7e:0, corePoolSize=3, maxPoolSize=3 2024-12-02T03:43:04,286 DEBUG [RS:2;e2eaa0f11f7e:33293 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T03:43:04,286 DEBUG [RS:1;e2eaa0f11f7e:40787 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T03:43:04,286 DEBUG [RS:2;e2eaa0f11f7e:33293 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T03:43:04,286 DEBUG [RS:2;e2eaa0f11f7e:33293 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T03:43:04,286 DEBUG [RS:1;e2eaa0f11f7e:40787 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T03:43:04,286 DEBUG [RS:2;e2eaa0f11f7e:33293 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T03:43:04,286 DEBUG [RS:2;e2eaa0f11f7e:33293 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0, corePoolSize=3, maxPoolSize=3 2024-12-02T03:43:04,286 DEBUG [RS:1;e2eaa0f11f7e:40787 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T03:43:04,286 DEBUG [RS:2;e2eaa0f11f7e:33293 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/e2eaa0f11f7e:0, corePoolSize=3, maxPoolSize=3 2024-12-02T03:43:04,286 DEBUG [RS:1;e2eaa0f11f7e:40787 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0, corePoolSize=3, maxPoolSize=3 2024-12-02T03:43:04,286 DEBUG [RS:1;e2eaa0f11f7e:40787 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/e2eaa0f11f7e:0, corePoolSize=3, maxPoolSize=3 2024-12-02T03:43:04,291 INFO [RS:2;e2eaa0f11f7e:33293 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T03:43:04,291 INFO [RS:0;e2eaa0f11f7e:33091 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T03:43:04,291 INFO [RS:2;e2eaa0f11f7e:33293 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T03:43:04,291 INFO [RS:0;e2eaa0f11f7e:33091 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T03:43:04,291 INFO [RS:2;e2eaa0f11f7e:33293 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T03:43:04,291 INFO [RS:0;e2eaa0f11f7e:33091 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T03:43:04,291 INFO [RS:2;e2eaa0f11f7e:33293 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-02T03:43:04,291 INFO [RS:0;e2eaa0f11f7e:33091 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-02T03:43:04,291 INFO [RS:2;e2eaa0f11f7e:33293 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-02T03:43:04,291 INFO [RS:0;e2eaa0f11f7e:33091 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-02T03:43:04,291 INFO [RS:2;e2eaa0f11f7e:33293 {}] hbase.ChoreService(168): Chore ScheduledChore name=e2eaa0f11f7e,33293,1733110982974-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-02T03:43:04,291 INFO [RS:0;e2eaa0f11f7e:33091 {}] hbase.ChoreService(168): Chore ScheduledChore name=e2eaa0f11f7e,33091,1733110982783-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-02T03:43:04,294 INFO [RS:1;e2eaa0f11f7e:40787 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T03:43:04,295 INFO [RS:1;e2eaa0f11f7e:40787 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T03:43:04,295 INFO [RS:1;e2eaa0f11f7e:40787 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T03:43:04,295 INFO [RS:1;e2eaa0f11f7e:40787 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-02T03:43:04,295 INFO [RS:1;e2eaa0f11f7e:40787 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-02T03:43:04,295 INFO [RS:1;e2eaa0f11f7e:40787 {}] hbase.ChoreService(168): Chore ScheduledChore name=e2eaa0f11f7e,40787,1733110982894-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-02T03:43:04,323 INFO [RS:2;e2eaa0f11f7e:33293 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-02T03:43:04,323 INFO [RS:1;e2eaa0f11f7e:40787 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-02T03:43:04,323 INFO [RS:0;e2eaa0f11f7e:33091 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-02T03:43:04,325 INFO [RS:0;e2eaa0f11f7e:33091 {}] hbase.ChoreService(168): Chore ScheduledChore name=e2eaa0f11f7e,33091,1733110982783-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T03:43:04,325 INFO [RS:2;e2eaa0f11f7e:33293 {}] hbase.ChoreService(168): Chore ScheduledChore name=e2eaa0f11f7e,33293,1733110982974-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T03:43:04,325 INFO [RS:1;e2eaa0f11f7e:40787 {}] hbase.ChoreService(168): Chore ScheduledChore name=e2eaa0f11f7e,40787,1733110982894-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T03:43:04,325 INFO [RS:1;e2eaa0f11f7e:40787 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T03:43:04,325 INFO [RS:2;e2eaa0f11f7e:33293 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T03:43:04,325 INFO [RS:0;e2eaa0f11f7e:33091 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T03:43:04,325 INFO [RS:1;e2eaa0f11f7e:40787 {}] regionserver.Replication(171): e2eaa0f11f7e,40787,1733110982894 started 2024-12-02T03:43:04,325 INFO [RS:0;e2eaa0f11f7e:33091 {}] regionserver.Replication(171): e2eaa0f11f7e,33091,1733110982783 started 2024-12-02T03:43:04,325 INFO [RS:2;e2eaa0f11f7e:33293 {}] regionserver.Replication(171): e2eaa0f11f7e,33293,1733110982974 started 2024-12-02T03:43:04,343 INFO [RS:2;e2eaa0f11f7e:33293 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T03:43:04,343 INFO [RS:2;e2eaa0f11f7e:33293 {}] regionserver.HRegionServer(1482): Serving as e2eaa0f11f7e,33293,1733110982974, RpcServer on e2eaa0f11f7e/172.17.0.2:33293, sessionid=0x101956c759f0003 2024-12-02T03:43:04,343 WARN [e2eaa0f11f7e:36013 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-02T03:43:04,344 DEBUG [RS:2;e2eaa0f11f7e:33293 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-02T03:43:04,344 DEBUG [RS:2;e2eaa0f11f7e:33293 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager e2eaa0f11f7e,33293,1733110982974 2024-12-02T03:43:04,344 DEBUG [RS:2;e2eaa0f11f7e:33293 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e2eaa0f11f7e,33293,1733110982974' 2024-12-02T03:43:04,344 DEBUG [RS:2;e2eaa0f11f7e:33293 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-02T03:43:04,345 DEBUG [RS:2;e2eaa0f11f7e:33293 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-02T03:43:04,346 DEBUG [RS:2;e2eaa0f11f7e:33293 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-02T03:43:04,346 DEBUG [RS:2;e2eaa0f11f7e:33293 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-02T03:43:04,346 DEBUG [RS:2;e2eaa0f11f7e:33293 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager e2eaa0f11f7e,33293,1733110982974 2024-12-02T03:43:04,346 DEBUG [RS:2;e2eaa0f11f7e:33293 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e2eaa0f11f7e,33293,1733110982974' 2024-12-02T03:43:04,346 DEBUG [RS:2;e2eaa0f11f7e:33293 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-02T03:43:04,346 INFO [RS:1;e2eaa0f11f7e:40787 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T03:43:04,346 INFO [RS:0;e2eaa0f11f7e:33091 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T03:43:04,346 DEBUG [RS:2;e2eaa0f11f7e:33293 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-02T03:43:04,346 INFO [RS:0;e2eaa0f11f7e:33091 {}] regionserver.HRegionServer(1482): Serving as e2eaa0f11f7e,33091,1733110982783, RpcServer on e2eaa0f11f7e/172.17.0.2:33091, sessionid=0x101956c759f0001 2024-12-02T03:43:04,346 INFO [RS:1;e2eaa0f11f7e:40787 {}] regionserver.HRegionServer(1482): Serving as e2eaa0f11f7e,40787,1733110982894, RpcServer on e2eaa0f11f7e/172.17.0.2:40787, sessionid=0x101956c759f0002 2024-12-02T03:43:04,347 DEBUG [RS:0;e2eaa0f11f7e:33091 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-02T03:43:04,347 DEBUG [RS:1;e2eaa0f11f7e:40787 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-02T03:43:04,347 DEBUG [RS:0;e2eaa0f11f7e:33091 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager e2eaa0f11f7e,33091,1733110982783 2024-12-02T03:43:04,347 DEBUG [RS:1;e2eaa0f11f7e:40787 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager e2eaa0f11f7e,40787,1733110982894 2024-12-02T03:43:04,347 DEBUG [RS:0;e2eaa0f11f7e:33091 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e2eaa0f11f7e,33091,1733110982783' 2024-12-02T03:43:04,347 DEBUG [RS:1;e2eaa0f11f7e:40787 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e2eaa0f11f7e,40787,1733110982894' 2024-12-02T03:43:04,347 DEBUG [RS:1;e2eaa0f11f7e:40787 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-02T03:43:04,347 DEBUG [RS:0;e2eaa0f11f7e:33091 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-02T03:43:04,347 DEBUG [RS:2;e2eaa0f11f7e:33293 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-02T03:43:04,347 INFO [RS:2;e2eaa0f11f7e:33293 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-02T03:43:04,347 INFO [RS:2;e2eaa0f11f7e:33293 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-02T03:43:04,348 DEBUG [RS:1;e2eaa0f11f7e:40787 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-02T03:43:04,348 DEBUG [RS:0;e2eaa0f11f7e:33091 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-02T03:43:04,348 DEBUG [RS:1;e2eaa0f11f7e:40787 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-02T03:43:04,348 DEBUG [RS:1;e2eaa0f11f7e:40787 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-02T03:43:04,348 DEBUG [RS:0;e2eaa0f11f7e:33091 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-02T03:43:04,348 DEBUG [RS:1;e2eaa0f11f7e:40787 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager e2eaa0f11f7e,40787,1733110982894 2024-12-02T03:43:04,348 DEBUG [RS:0;e2eaa0f11f7e:33091 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-02T03:43:04,348 DEBUG [RS:1;e2eaa0f11f7e:40787 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e2eaa0f11f7e,40787,1733110982894' 2024-12-02T03:43:04,348 DEBUG [RS:0;e2eaa0f11f7e:33091 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager e2eaa0f11f7e,33091,1733110982783 2024-12-02T03:43:04,348 DEBUG [RS:1;e2eaa0f11f7e:40787 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-02T03:43:04,348 DEBUG [RS:0;e2eaa0f11f7e:33091 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e2eaa0f11f7e,33091,1733110982783' 2024-12-02T03:43:04,348 DEBUG [RS:0;e2eaa0f11f7e:33091 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-02T03:43:04,349 DEBUG [RS:1;e2eaa0f11f7e:40787 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-02T03:43:04,349 DEBUG [RS:0;e2eaa0f11f7e:33091 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-02T03:43:04,349 DEBUG [RS:1;e2eaa0f11f7e:40787 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-02T03:43:04,349 INFO [RS:1;e2eaa0f11f7e:40787 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-02T03:43:04,349 DEBUG [RS:0;e2eaa0f11f7e:33091 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-02T03:43:04,349 INFO [RS:1;e2eaa0f11f7e:40787 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-02T03:43:04,349 INFO [RS:0;e2eaa0f11f7e:33091 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-02T03:43:04,349 INFO [RS:0;e2eaa0f11f7e:33091 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-02T03:43:04,459 INFO [RS:2;e2eaa0f11f7e:33293 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-02T03:43:04,459 INFO [RS:0;e2eaa0f11f7e:33091 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-02T03:43:04,459 INFO [RS:1;e2eaa0f11f7e:40787 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-02T03:43:04,461 INFO [RS:2;e2eaa0f11f7e:33293 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e2eaa0f11f7e%2C33293%2C1733110982974, suffix=, logDir=hdfs://localhost:40287/user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/WALs/e2eaa0f11f7e,33293,1733110982974, archiveDir=hdfs://localhost:40287/user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/oldWALs, maxLogs=32 2024-12-02T03:43:04,461 INFO [RS:0;e2eaa0f11f7e:33091 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e2eaa0f11f7e%2C33091%2C1733110982783, suffix=, logDir=hdfs://localhost:40287/user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/WALs/e2eaa0f11f7e,33091,1733110982783, archiveDir=hdfs://localhost:40287/user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/oldWALs, maxLogs=32 2024-12-02T03:43:04,461 INFO [RS:1;e2eaa0f11f7e:40787 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e2eaa0f11f7e%2C40787%2C1733110982894, suffix=, logDir=hdfs://localhost:40287/user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/WALs/e2eaa0f11f7e,40787,1733110982894, archiveDir=hdfs://localhost:40287/user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/oldWALs, maxLogs=32 2024-12-02T03:43:04,476 DEBUG [RS:2;e2eaa0f11f7e:33293 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/WALs/e2eaa0f11f7e,33293,1733110982974/e2eaa0f11f7e%2C33293%2C1733110982974.1733110984464, exclude list is [], retry=0 2024-12-02T03:43:04,481 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37105,DS-212d6a92-c2b5-4c10-83d0-74d32e944a07,DISK] 2024-12-02T03:43:04,482 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36705,DS-e6f25c0d-84bb-4001-a116-33c4140682e1,DISK] 2024-12-02T03:43:04,482 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46389,DS-aecf774a-d3c4-4b4f-8001-aa6792c6d4f5,DISK] 2024-12-02T03:43:04,483 DEBUG [RS:1;e2eaa0f11f7e:40787 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/WALs/e2eaa0f11f7e,40787,1733110982894/e2eaa0f11f7e%2C40787%2C1733110982894.1733110984464, exclude list is [], retry=0 2024-12-02T03:43:04,483 DEBUG [RS:0;e2eaa0f11f7e:33091 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/WALs/e2eaa0f11f7e,33091,1733110982783/e2eaa0f11f7e%2C33091%2C1733110982783.1733110984464, exclude list is [], retry=0 2024-12-02T03:43:04,492 INFO [RS:2;e2eaa0f11f7e:33293 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/WALs/e2eaa0f11f7e,33293,1733110982974/e2eaa0f11f7e%2C33293%2C1733110982974.1733110984464 2024-12-02T03:43:04,493 DEBUG [RS:2;e2eaa0f11f7e:33293 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36391:36391),(127.0.0.1/127.0.0.1:37455:37455),(127.0.0.1/127.0.0.1:37283:37283)] 2024-12-02T03:43:04,528 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46389,DS-aecf774a-d3c4-4b4f-8001-aa6792c6d4f5,DISK] 2024-12-02T03:43:04,528 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46389,DS-aecf774a-d3c4-4b4f-8001-aa6792c6d4f5,DISK] 2024-12-02T03:43:04,528 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36705,DS-e6f25c0d-84bb-4001-a116-33c4140682e1,DISK] 2024-12-02T03:43:04,529 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37105,DS-212d6a92-c2b5-4c10-83d0-74d32e944a07,DISK] 2024-12-02T03:43:04,529 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37105,DS-212d6a92-c2b5-4c10-83d0-74d32e944a07,DISK] 2024-12-02T03:43:04,530 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36705,DS-e6f25c0d-84bb-4001-a116-33c4140682e1,DISK] 2024-12-02T03:43:04,534 INFO [RS:1;e2eaa0f11f7e:40787 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/WALs/e2eaa0f11f7e,40787,1733110982894/e2eaa0f11f7e%2C40787%2C1733110982894.1733110984464 2024-12-02T03:43:04,535 DEBUG [RS:1;e2eaa0f11f7e:40787 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36391:36391),(127.0.0.1/127.0.0.1:37455:37455),(127.0.0.1/127.0.0.1:37283:37283)] 2024-12-02T03:43:04,535 INFO [RS:0;e2eaa0f11f7e:33091 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/WALs/e2eaa0f11f7e,33091,1733110982783/e2eaa0f11f7e%2C33091%2C1733110982783.1733110984464 2024-12-02T03:43:04,536 DEBUG [RS:0;e2eaa0f11f7e:33091 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36391:36391),(127.0.0.1/127.0.0.1:37455:37455),(127.0.0.1/127.0.0.1:37283:37283)] 2024-12-02T03:43:04,597 DEBUG [e2eaa0f11f7e:36013 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-02T03:43:04,607 DEBUG [e2eaa0f11f7e:36013 {}] balancer.BalancerClusterState(204): Hosts are {e2eaa0f11f7e=0} racks are {/default-rack=0} 2024-12-02T03:43:04,615 DEBUG [e2eaa0f11f7e:36013 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-02T03:43:04,615 DEBUG [e2eaa0f11f7e:36013 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-02T03:43:04,615 DEBUG [e2eaa0f11f7e:36013 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-02T03:43:04,615 DEBUG [e2eaa0f11f7e:36013 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-02T03:43:04,615 DEBUG [e2eaa0f11f7e:36013 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-02T03:43:04,615 DEBUG [e2eaa0f11f7e:36013 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-02T03:43:04,615 INFO [e2eaa0f11f7e:36013 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-02T03:43:04,615 INFO [e2eaa0f11f7e:36013 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-02T03:43:04,615 INFO [e2eaa0f11f7e:36013 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-02T03:43:04,616 DEBUG [e2eaa0f11f7e:36013 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-02T03:43:04,621 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=e2eaa0f11f7e,40787,1733110982894 2024-12-02T03:43:04,627 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as e2eaa0f11f7e,40787,1733110982894, state=OPENING 2024-12-02T03:43:04,676 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-02T03:43:04,688 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36013-0x101956c759f0000, quorum=127.0.0.1:51411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T03:43:04,688 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x101956c759f0003, quorum=127.0.0.1:51411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T03:43:04,688 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33091-0x101956c759f0001, quorum=127.0.0.1:51411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T03:43:04,689 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40787-0x101956c759f0002, quorum=127.0.0.1:51411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T03:43:04,691 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T03:43:04,691 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T03:43:04,691 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T03:43:04,691 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T03:43:04,695 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-02T03:43:04,697 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=e2eaa0f11f7e,40787,1733110982894}] 2024-12-02T03:43:04,877 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-02T03:43:04,880 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55951, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-02T03:43:04,891 INFO [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-02T03:43:04,892 INFO [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-02T03:43:04,892 INFO [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-02T03:43:04,896 INFO [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e2eaa0f11f7e%2C40787%2C1733110982894.meta, suffix=.meta, logDir=hdfs://localhost:40287/user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/WALs/e2eaa0f11f7e,40787,1733110982894, archiveDir=hdfs://localhost:40287/user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/oldWALs, maxLogs=32 2024-12-02T03:43:04,912 DEBUG [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/WALs/e2eaa0f11f7e,40787,1733110982894/e2eaa0f11f7e%2C40787%2C1733110982894.meta.1733110984898.meta, exclude list is [], retry=0 2024-12-02T03:43:04,917 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46389,DS-aecf774a-d3c4-4b4f-8001-aa6792c6d4f5,DISK] 2024-12-02T03:43:04,917 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36705,DS-e6f25c0d-84bb-4001-a116-33c4140682e1,DISK] 2024-12-02T03:43:04,917 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37105,DS-212d6a92-c2b5-4c10-83d0-74d32e944a07,DISK] 2024-12-02T03:43:04,920 INFO [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/WALs/e2eaa0f11f7e,40787,1733110982894/e2eaa0f11f7e%2C40787%2C1733110982894.meta.1733110984898.meta 2024-12-02T03:43:04,921 DEBUG [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36391:36391),(127.0.0.1/127.0.0.1:37283:37283),(127.0.0.1/127.0.0.1:37455:37455)] 2024-12-02T03:43:04,921 DEBUG [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-02T03:43:04,923 DEBUG [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-02T03:43:04,925 DEBUG [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-02T03:43:04,929 INFO [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-02T03:43:04,934 DEBUG [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-02T03:43:04,934 DEBUG [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T03:43:04,935 DEBUG [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-02T03:43:04,935 DEBUG [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-02T03:43:04,938 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-02T03:43:04,939 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-02T03:43:04,939 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T03:43:04,940 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T03:43:04,941 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-02T03:43:04,942 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-02T03:43:04,942 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T03:43:04,943 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T03:43:04,943 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-02T03:43:04,944 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-02T03:43:04,944 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T03:43:04,945 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T03:43:04,946 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-02T03:43:04,947 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-02T03:43:04,947 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T03:43:04,948 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T03:43:04,949 DEBUG [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-02T03:43:04,951 DEBUG [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40287/user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/data/hbase/meta/1588230740 2024-12-02T03:43:04,954 DEBUG [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40287/user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/data/hbase/meta/1588230740 2024-12-02T03:43:04,957 DEBUG [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-02T03:43:04,957 DEBUG [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-02T03:43:04,958 DEBUG [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-02T03:43:04,960 DEBUG [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-02T03:43:04,961 INFO [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67025970, jitterRate=-0.0012352168560028076}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-02T03:43:04,962 DEBUG [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-02T03:43:04,964 DEBUG [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733110984935Writing region info on filesystem at 1733110984935Initializing all the Stores at 1733110984937 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733110984937Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733110984938 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733110984938Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733110984938Cleaning up temporary data from old regions at 1733110984957 (+19 ms)Running coprocessor post-open hooks at 1733110984962 (+5 ms)Region opened successfully at 1733110984964 (+2 ms) 2024-12-02T03:43:04,973 INFO [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733110984871 2024-12-02T03:43:04,984 DEBUG [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-02T03:43:04,984 INFO [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-02T03:43:04,986 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=e2eaa0f11f7e,40787,1733110982894 2024-12-02T03:43:04,988 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as e2eaa0f11f7e,40787,1733110982894, state=OPEN 2024-12-02T03:43:05,018 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40787-0x101956c759f0002, quorum=127.0.0.1:51411, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-02T03:43:05,017 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x101956c759f0003, quorum=127.0.0.1:51411, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-02T03:43:05,017 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33091-0x101956c759f0001, quorum=127.0.0.1:51411, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-02T03:43:05,018 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36013-0x101956c759f0000, quorum=127.0.0.1:51411, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-02T03:43:05,018 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T03:43:05,018 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T03:43:05,018 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T03:43:05,018 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T03:43:05,019 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=e2eaa0f11f7e,40787,1733110982894 2024-12-02T03:43:05,027 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-02T03:43:05,027 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=e2eaa0f11f7e,40787,1733110982894 in 322 msec 2024-12-02T03:43:05,034 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-02T03:43:05,034 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 849 msec 2024-12-02T03:43:05,035 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-02T03:43:05,036 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-02T03:43:05,053 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T03:43:05,054 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e2eaa0f11f7e,40787,1733110982894, seqNum=-1] 2024-12-02T03:43:05,072 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T03:43:05,074 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42933, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T03:43:05,116 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.1470 sec 2024-12-02T03:43:05,116 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733110985116, completionTime=-1 2024-12-02T03:43:05,120 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-02T03:43:05,120 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-02T03:43:05,148 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-02T03:43:05,148 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733111045148 2024-12-02T03:43:05,148 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733111105148 2024-12-02T03:43:05,148 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 27 msec 2024-12-02T03:43:05,150 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-02T03:43:05,158 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e2eaa0f11f7e,36013,1733110982095-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T03:43:05,158 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e2eaa0f11f7e,36013,1733110982095-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T03:43:05,158 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e2eaa0f11f7e,36013,1733110982095-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T03:43:05,160 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-e2eaa0f11f7e:36013, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T03:43:05,160 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-02T03:43:05,161 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-02T03:43:05,165 DEBUG [master/e2eaa0f11f7e:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-02T03:43:05,187 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.092sec 2024-12-02T03:43:05,188 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-02T03:43:05,190 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-02T03:43:05,190 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-02T03:43:05,191 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-02T03:43:05,191 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-02T03:43:05,191 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e2eaa0f11f7e,36013,1733110982095-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-02T03:43:05,192 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e2eaa0f11f7e,36013,1733110982095-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-02T03:43:05,196 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-02T03:43:05,197 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-02T03:43:05,197 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e2eaa0f11f7e,36013,1733110982095-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T03:43:05,223 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@296587b6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T03:43:05,227 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-02T03:43:05,227 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-02T03:43:05,232 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request e2eaa0f11f7e,36013,-1 for getting cluster id 2024-12-02T03:43:05,234 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T03:43:05,241 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '99423827-3100-4523-8a00-e40c70e3d16a' 2024-12-02T03:43:05,243 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T03:43:05,243 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "99423827-3100-4523-8a00-e40c70e3d16a" 2024-12-02T03:43:05,245 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@63892f90, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T03:43:05,245 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e2eaa0f11f7e,36013,-1] 2024-12-02T03:43:05,248 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T03:43:05,250 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T03:43:05,251 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33494, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T03:43:05,253 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7c738163, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T03:43:05,254 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T03:43:05,261 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e2eaa0f11f7e,40787,1733110982894, seqNum=-1] 2024-12-02T03:43:05,262 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T03:43:05,264 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39424, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T03:43:05,285 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=e2eaa0f11f7e,36013,1733110982095 2024-12-02T03:43:05,288 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-02T03:43:05,291 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is e2eaa0f11f7e,36013,1733110982095 2024-12-02T03:43:05,293 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@68b89ab3 2024-12-02T03:43:05,294 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-02T03:43:05,297 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33496, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-02T03:43:05,303 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36013 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T03:43:05,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36013 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-12-02T03:43:05,314 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-12-02T03:43:05,316 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36013 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-12-02T03:43:05,316 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T03:43:05,319 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-02T03:43:05,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36013 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-02T03:43:05,328 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-02T03:43:05,328 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-02T03:43:05,331 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-829934895_22 at /127.0.0.1:55390 [Receiving block BP-1468540406-172.17.0.2-1733110978283:blk_-9223372036854775680_1020] {}] datanode.DataXceiver(331): 127.0.0.1:36705:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55390 dst: /127.0.0.1:36705 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T03:43:05,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36705 is added to blk_-9223372036854775680_1021 (size=392) 2024-12-02T03:43:05,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36013 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-02T03:43:05,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36013 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-02T03:43:05,743 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-02T03:43:05,748 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 4c19cb1722a3797a9c03f9ec373f9549, NAME => 'TestHBaseWalOnEC,,1733110985298.4c19cb1722a3797a9c03f9ec373f9549.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40287/user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777 2024-12-02T03:43:05,756 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-02T03:43:05,756 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-02T03:43:05,759 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-829934895_22 at /127.0.0.1:55400 [Receiving block BP-1468540406-172.17.0.2-1733110978283:blk_-9223372036854775664_1022] {}] datanode.DataXceiver(331): 127.0.0.1:36705:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55400 dst: /127.0.0.1:36705 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T03:43:05,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36705 is added to blk_-9223372036854775664_1023 (size=51) 2024-12-02T03:43:05,763 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-02T03:43:05,763 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733110985298.4c19cb1722a3797a9c03f9ec373f9549.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T03:43:05,764 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing 4c19cb1722a3797a9c03f9ec373f9549, disabling compactions & flushes 2024-12-02T03:43:05,764 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733110985298.4c19cb1722a3797a9c03f9ec373f9549. 2024-12-02T03:43:05,764 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733110985298.4c19cb1722a3797a9c03f9ec373f9549. 2024-12-02T03:43:05,764 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733110985298.4c19cb1722a3797a9c03f9ec373f9549. after waiting 0 ms 2024-12-02T03:43:05,764 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733110985298.4c19cb1722a3797a9c03f9ec373f9549. 2024-12-02T03:43:05,764 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733110985298.4c19cb1722a3797a9c03f9ec373f9549. 2024-12-02T03:43:05,764 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for 4c19cb1722a3797a9c03f9ec373f9549: Waiting for close lock at 1733110985764Disabling compacts and flushes for region at 1733110985764Disabling writes for close at 1733110985764Writing region close event to WAL at 1733110985764Closed at 1733110985764 2024-12-02T03:43:05,766 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-12-02T03:43:05,770 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1733110985298.4c19cb1722a3797a9c03f9ec373f9549.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1733110985766"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733110985766"}]},"ts":"1733110985766"} 2024-12-02T03:43:05,774 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-02T03:43:05,776 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-02T03:43:05,779 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733110985776"}]},"ts":"1733110985776"} 2024-12-02T03:43:05,783 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-12-02T03:43:05,784 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {e2eaa0f11f7e=0} racks are {/default-rack=0} 2024-12-02T03:43:05,785 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-02T03:43:05,785 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-02T03:43:05,785 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-02T03:43:05,785 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-02T03:43:05,785 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-02T03:43:05,785 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-02T03:43:05,785 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-02T03:43:05,785 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-02T03:43:05,785 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-02T03:43:05,785 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-02T03:43:05,787 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=4c19cb1722a3797a9c03f9ec373f9549, ASSIGN}] 2024-12-02T03:43:05,789 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=4c19cb1722a3797a9c03f9ec373f9549, ASSIGN 2024-12-02T03:43:05,791 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=4c19cb1722a3797a9c03f9ec373f9549, ASSIGN; state=OFFLINE, location=e2eaa0f11f7e,33293,1733110982974; forceNewPlan=false, retain=false 2024-12-02T03:43:05,945 INFO [e2eaa0f11f7e:36013 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-02T03:43:05,946 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=4c19cb1722a3797a9c03f9ec373f9549, regionState=OPENING, regionLocation=e2eaa0f11f7e,33293,1733110982974 2024-12-02T03:43:05,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36013 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-02T03:43:05,952 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=4c19cb1722a3797a9c03f9ec373f9549, ASSIGN because future has completed 2024-12-02T03:43:05,953 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4c19cb1722a3797a9c03f9ec373f9549, server=e2eaa0f11f7e,33293,1733110982974}] 2024-12-02T03:43:06,109 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-02T03:43:06,113 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57953, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-02T03:43:06,123 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1733110985298.4c19cb1722a3797a9c03f9ec373f9549. 2024-12-02T03:43:06,124 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 4c19cb1722a3797a9c03f9ec373f9549, NAME => 'TestHBaseWalOnEC,,1733110985298.4c19cb1722a3797a9c03f9ec373f9549.', STARTKEY => '', ENDKEY => ''} 2024-12-02T03:43:06,124 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC 4c19cb1722a3797a9c03f9ec373f9549 2024-12-02T03:43:06,124 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733110985298.4c19cb1722a3797a9c03f9ec373f9549.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T03:43:06,125 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 4c19cb1722a3797a9c03f9ec373f9549 2024-12-02T03:43:06,125 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 4c19cb1722a3797a9c03f9ec373f9549 2024-12-02T03:43:06,127 INFO [StoreOpener-4c19cb1722a3797a9c03f9ec373f9549-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 4c19cb1722a3797a9c03f9ec373f9549 2024-12-02T03:43:06,130 INFO [StoreOpener-4c19cb1722a3797a9c03f9ec373f9549-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4c19cb1722a3797a9c03f9ec373f9549 columnFamilyName cf 2024-12-02T03:43:06,130 DEBUG [StoreOpener-4c19cb1722a3797a9c03f9ec373f9549-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T03:43:06,131 INFO [StoreOpener-4c19cb1722a3797a9c03f9ec373f9549-1 {}] regionserver.HStore(327): Store=4c19cb1722a3797a9c03f9ec373f9549/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T03:43:06,131 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 4c19cb1722a3797a9c03f9ec373f9549 2024-12-02T03:43:06,132 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40287/user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/data/default/TestHBaseWalOnEC/4c19cb1722a3797a9c03f9ec373f9549 2024-12-02T03:43:06,133 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40287/user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/data/default/TestHBaseWalOnEC/4c19cb1722a3797a9c03f9ec373f9549 2024-12-02T03:43:06,133 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 4c19cb1722a3797a9c03f9ec373f9549 2024-12-02T03:43:06,133 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 4c19cb1722a3797a9c03f9ec373f9549 2024-12-02T03:43:06,136 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 4c19cb1722a3797a9c03f9ec373f9549 2024-12-02T03:43:06,142 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40287/user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/data/default/TestHBaseWalOnEC/4c19cb1722a3797a9c03f9ec373f9549/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T03:43:06,143 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 4c19cb1722a3797a9c03f9ec373f9549; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73108951, jitterRate=0.0894082635641098}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T03:43:06,143 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 4c19cb1722a3797a9c03f9ec373f9549 2024-12-02T03:43:06,144 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 4c19cb1722a3797a9c03f9ec373f9549: Running coprocessor pre-open hook at 1733110986125Writing region info on filesystem at 1733110986125Initializing all the Stores at 1733110986127 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733110986127Cleaning up temporary data from old regions at 1733110986133 (+6 ms)Running coprocessor post-open hooks at 1733110986143 (+10 ms)Region opened successfully at 1733110986144 (+1 ms) 2024-12-02T03:43:06,146 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1733110985298.4c19cb1722a3797a9c03f9ec373f9549., pid=6, masterSystemTime=1733110986108 2024-12-02T03:43:06,149 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1733110985298.4c19cb1722a3797a9c03f9ec373f9549. 2024-12-02T03:43:06,149 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1733110985298.4c19cb1722a3797a9c03f9ec373f9549. 2024-12-02T03:43:06,150 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=4c19cb1722a3797a9c03f9ec373f9549, regionState=OPEN, openSeqNum=2, regionLocation=e2eaa0f11f7e,33293,1733110982974 2024-12-02T03:43:06,155 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4c19cb1722a3797a9c03f9ec373f9549, server=e2eaa0f11f7e,33293,1733110982974 because future has completed 2024-12-02T03:43:06,161 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-02T03:43:06,163 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 4c19cb1722a3797a9c03f9ec373f9549, server=e2eaa0f11f7e,33293,1733110982974 in 204 msec 2024-12-02T03:43:06,165 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-02T03:43:06,166 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=4c19cb1722a3797a9c03f9ec373f9549, ASSIGN in 374 msec 2024-12-02T03:43:06,167 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-02T03:43:06,167 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733110986167"}]},"ts":"1733110986167"} 2024-12-02T03:43:06,170 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-12-02T03:43:06,172 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-12-02T03:43:06,175 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 867 msec 2024-12-02T03:43:06,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36013 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-02T03:43:06,462 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-12-02T03:43:06,462 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-12-02T03:43:06,464 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T03:43:06,473 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-12-02T03:43:06,474 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T03:43:06,475 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-12-02T03:43:06,484 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1733110985298.4c19cb1722a3797a9c03f9ec373f9549., hostname=e2eaa0f11f7e,33293,1733110982974, seqNum=2] 2024-12-02T03:43:06,486 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T03:43:06,488 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44964, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T03:43:06,496 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36013 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-12-02T03:43:06,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36013 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-12-02T03:43:06,502 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-12-02T03:43:06,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36013 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-02T03:43:06,504 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-02T03:43:06,505 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-02T03:43:06,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36013 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-02T03:43:06,673 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33293 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-02T03:43:06,674 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1733110985298.4c19cb1722a3797a9c03f9ec373f9549. 2024-12-02T03:43:06,678 INFO [RS_FLUSH_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 4c19cb1722a3797a9c03f9ec373f9549 1/1 column families, dataSize=32 B heapSize=360 B 2024-12-02T03:43:06,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40287/user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/data/default/TestHBaseWalOnEC/4c19cb1722a3797a9c03f9ec373f9549/.tmp/cf/25d81b3001d44fff8d112d43f39efb79 is 36, key is row/cf:cq/1733110986489/Put/seqid=0 2024-12-02T03:43:06,734 WARN [RS_FLUSH_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-02T03:43:06,734 WARN [RS_FLUSH_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-02T03:43:06,738 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1746425157_22 at /127.0.0.1:44162 [Receiving block BP-1468540406-172.17.0.2-1733110978283:blk_-9223372036854775648_1024] {}] datanode.DataXceiver(331): 127.0.0.1:37105:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44162 dst: /127.0.0.1:37105 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T03:43:06,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37105 is added to blk_-9223372036854775648_1025 (size=4787) 2024-12-02T03:43:06,743 WARN [RS_FLUSH_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-02T03:43:06,743 INFO [RS_FLUSH_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:40287/user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/data/default/TestHBaseWalOnEC/4c19cb1722a3797a9c03f9ec373f9549/.tmp/cf/25d81b3001d44fff8d112d43f39efb79 2024-12-02T03:43:06,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40287/user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/data/default/TestHBaseWalOnEC/4c19cb1722a3797a9c03f9ec373f9549/.tmp/cf/25d81b3001d44fff8d112d43f39efb79 as hdfs://localhost:40287/user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/data/default/TestHBaseWalOnEC/4c19cb1722a3797a9c03f9ec373f9549/cf/25d81b3001d44fff8d112d43f39efb79 2024-12-02T03:43:06,796 INFO [RS_FLUSH_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40287/user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/data/default/TestHBaseWalOnEC/4c19cb1722a3797a9c03f9ec373f9549/cf/25d81b3001d44fff8d112d43f39efb79, entries=1, sequenceid=5, filesize=4.7 K 2024-12-02T03:43:06,803 INFO [RS_FLUSH_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for 4c19cb1722a3797a9c03f9ec373f9549 in 125ms, sequenceid=5, compaction requested=false 2024-12-02T03:43:06,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestHBaseWalOnEC' 2024-12-02T03:43:06,806 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 4c19cb1722a3797a9c03f9ec373f9549: 2024-12-02T03:43:06,806 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1733110985298.4c19cb1722a3797a9c03f9ec373f9549. 2024-12-02T03:43:06,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-02T03:43:06,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36013 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-02T03:43:06,814 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-02T03:43:06,814 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 306 msec 2024-12-02T03:43:06,817 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 318 msec 2024-12-02T03:43:06,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36013 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-02T03:43:06,819 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-12-02T03:43:06,831 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-02T03:43:06,832 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-02T03:43:06,832 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T03:43:06,835 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T03:43:06,836 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T03:43:06,836 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T03:43:06,836 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-02T03:43:06,836 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=764753395, stopped=false 2024-12-02T03:43:06,837 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=e2eaa0f11f7e,36013,1733110982095 2024-12-02T03:43:06,879 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x101956c759f0003, quorum=127.0.0.1:51411, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-02T03:43:06,879 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33091-0x101956c759f0001, quorum=127.0.0.1:51411, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-02T03:43:06,879 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36013-0x101956c759f0000, quorum=127.0.0.1:51411, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-02T03:43:06,879 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40787-0x101956c759f0002, quorum=127.0.0.1:51411, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-02T03:43:06,879 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x101956c759f0003, quorum=127.0.0.1:51411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T03:43:06,880 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36013-0x101956c759f0000, quorum=127.0.0.1:51411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T03:43:06,880 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33091-0x101956c759f0001, quorum=127.0.0.1:51411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T03:43:06,880 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40787-0x101956c759f0002, quorum=127.0.0.1:51411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T03:43:06,880 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-02T03:43:06,881 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-02T03:43:06,881 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:36013-0x101956c759f0000, quorum=127.0.0.1:51411, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T03:43:06,881 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33091-0x101956c759f0001, quorum=127.0.0.1:51411, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T03:43:06,882 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33293-0x101956c759f0003, quorum=127.0.0.1:51411, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T03:43:06,882 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:40787-0x101956c759f0002, quorum=127.0.0.1:51411, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T03:43:06,882 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T03:43:06,882 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T03:43:06,882 INFO [RS:2;e2eaa0f11f7e:33293 {}] regionserver.HRegionServer(878): Closing user regions 2024-12-02T03:43:06,883 INFO [RS:2;e2eaa0f11f7e:33293 {}] regionserver.HRegionServer(3091): Received CLOSE for 4c19cb1722a3797a9c03f9ec373f9549 2024-12-02T03:43:06,884 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'e2eaa0f11f7e,33091,1733110982783' ***** 2024-12-02T03:43:06,884 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-02T03:43:06,884 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'e2eaa0f11f7e,40787,1733110982894' ***** 2024-12-02T03:43:06,884 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-02T03:43:06,884 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'e2eaa0f11f7e,33293,1733110982974' ***** 2024-12-02T03:43:06,884 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-02T03:43:06,884 INFO [RS:0;e2eaa0f11f7e:33091 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-02T03:43:06,884 INFO [RS:1;e2eaa0f11f7e:40787 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-02T03:43:06,885 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-02T03:43:06,885 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-02T03:43:06,885 INFO [RS:1;e2eaa0f11f7e:40787 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-02T03:43:06,885 INFO [RS:0;e2eaa0f11f7e:33091 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-02T03:43:06,885 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 4c19cb1722a3797a9c03f9ec373f9549, disabling compactions & flushes 2024-12-02T03:43:06,885 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733110985298.4c19cb1722a3797a9c03f9ec373f9549. 2024-12-02T03:43:06,886 INFO [RS:1;e2eaa0f11f7e:40787 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-02T03:43:06,886 INFO [RS:0;e2eaa0f11f7e:33091 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-02T03:43:06,886 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733110985298.4c19cb1722a3797a9c03f9ec373f9549. 2024-12-02T03:43:06,886 INFO [RS:1;e2eaa0f11f7e:40787 {}] regionserver.HRegionServer(959): stopping server e2eaa0f11f7e,40787,1733110982894 2024-12-02T03:43:06,886 INFO [RS:0;e2eaa0f11f7e:33091 {}] regionserver.HRegionServer(959): stopping server e2eaa0f11f7e,33091,1733110982783 2024-12-02T03:43:06,886 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733110985298.4c19cb1722a3797a9c03f9ec373f9549. after waiting 0 ms 2024-12-02T03:43:06,886 INFO [RS:1;e2eaa0f11f7e:40787 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-02T03:43:06,886 INFO [RS:0;e2eaa0f11f7e:33091 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-02T03:43:06,886 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733110985298.4c19cb1722a3797a9c03f9ec373f9549. 2024-12-02T03:43:06,886 INFO [RS:1;e2eaa0f11f7e:40787 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;e2eaa0f11f7e:40787. 2024-12-02T03:43:06,886 INFO [RS:0;e2eaa0f11f7e:33091 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;e2eaa0f11f7e:33091. 2024-12-02T03:43:06,886 DEBUG [RS:1;e2eaa0f11f7e:40787 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T03:43:06,886 DEBUG [RS:1;e2eaa0f11f7e:40787 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T03:43:06,886 DEBUG [RS:0;e2eaa0f11f7e:33091 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T03:43:06,886 DEBUG [RS:0;e2eaa0f11f7e:33091 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T03:43:06,887 INFO [RS:1;e2eaa0f11f7e:40787 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-02T03:43:06,887 INFO [RS:1;e2eaa0f11f7e:40787 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-02T03:43:06,887 INFO [RS:1;e2eaa0f11f7e:40787 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-02T03:43:06,887 INFO [RS:0;e2eaa0f11f7e:33091 {}] regionserver.HRegionServer(976): stopping server e2eaa0f11f7e,33091,1733110982783; all regions closed. 2024-12-02T03:43:06,887 INFO [RS:1;e2eaa0f11f7e:40787 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-02T03:43:06,888 INFO [RS:1;e2eaa0f11f7e:40787 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-02T03:43:06,888 DEBUG [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-02T03:43:06,888 INFO [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-02T03:43:06,888 DEBUG [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-02T03:43:06,888 DEBUG [RS:1;e2eaa0f11f7e:40787 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-02T03:43:06,888 DEBUG [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-02T03:43:06,888 DEBUG [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-02T03:43:06,889 DEBUG [RS:1;e2eaa0f11f7e:40787 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-02T03:43:06,889 INFO [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-12-02T03:43:06,891 INFO [RS:2;e2eaa0f11f7e:33293 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-02T03:43:06,891 INFO [RS:2;e2eaa0f11f7e:33293 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-02T03:43:06,891 INFO [RS:2;e2eaa0f11f7e:33293 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-02T03:43:06,891 INFO [RS:2;e2eaa0f11f7e:33293 {}] regionserver.HRegionServer(959): stopping server e2eaa0f11f7e,33293,1733110982974 2024-12-02T03:43:06,891 INFO [RS:2;e2eaa0f11f7e:33293 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-02T03:43:06,891 INFO [RS:2;e2eaa0f11f7e:33293 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;e2eaa0f11f7e:33293. 2024-12-02T03:43:06,891 DEBUG [RS:2;e2eaa0f11f7e:33293 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T03:43:06,891 DEBUG [RS:2;e2eaa0f11f7e:33293 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T03:43:06,891 INFO [RS:2;e2eaa0f11f7e:33293 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-02T03:43:06,892 DEBUG [RS:2;e2eaa0f11f7e:33293 {}] regionserver.HRegionServer(1325): Online Regions={4c19cb1722a3797a9c03f9ec373f9549=TestHBaseWalOnEC,,1733110985298.4c19cb1722a3797a9c03f9ec373f9549.} 2024-12-02T03:43:06,892 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-02T03:43:06,892 DEBUG [RS:2;e2eaa0f11f7e:33293 {}] regionserver.HRegionServer(1351): Waiting on 4c19cb1722a3797a9c03f9ec373f9549 2024-12-02T03:43:06,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36705 is added to blk_1073741827_1017 (size=93) 2024-12-02T03:43:06,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37105 is added to blk_1073741827_1017 (size=93) 2024-12-02T03:43:06,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46389 is added to blk_1073741827_1017 (size=93) 2024-12-02T03:43:06,899 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40287/user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/data/default/TestHBaseWalOnEC/4c19cb1722a3797a9c03f9ec373f9549/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-02T03:43:06,901 DEBUG [RS:0;e2eaa0f11f7e:33091 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/oldWALs 2024-12-02T03:43:06,901 INFO [regionserver/e2eaa0f11f7e:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-02T03:43:06,901 INFO [regionserver/e2eaa0f11f7e:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-02T03:43:06,901 INFO [regionserver/e2eaa0f11f7e:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-02T03:43:06,901 INFO [RS:0;e2eaa0f11f7e:33091 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL e2eaa0f11f7e%2C33091%2C1733110982783:(num 1733110984464) 2024-12-02T03:43:06,901 DEBUG [RS:0;e2eaa0f11f7e:33091 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T03:43:06,901 INFO [RS:0;e2eaa0f11f7e:33091 {}] regionserver.LeaseManager(133): Closed leases 2024-12-02T03:43:06,901 INFO [RS:0;e2eaa0f11f7e:33091 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-02T03:43:06,902 INFO [RS:0;e2eaa0f11f7e:33091 {}] hbase.ChoreService(370): Chore service for: regionserver/e2eaa0f11f7e:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-02T03:43:06,902 INFO [RS:0;e2eaa0f11f7e:33091 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-02T03:43:06,902 INFO [RS:0;e2eaa0f11f7e:33091 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-02T03:43:06,902 INFO [regionserver/e2eaa0f11f7e:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-02T03:43:06,902 INFO [RS:0;e2eaa0f11f7e:33091 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-02T03:43:06,902 INFO [RS:0;e2eaa0f11f7e:33091 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-02T03:43:06,902 INFO [RS:0;e2eaa0f11f7e:33091 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33091 2024-12-02T03:43:06,903 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733110985298.4c19cb1722a3797a9c03f9ec373f9549. 2024-12-02T03:43:06,903 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 4c19cb1722a3797a9c03f9ec373f9549: Waiting for close lock at 1733110986885Running coprocessor pre-close hooks at 1733110986885Disabling compacts and flushes for region at 1733110986885Disabling writes for close at 1733110986886 (+1 ms)Writing region close event to WAL at 1733110986888 (+2 ms)Running coprocessor post-close hooks at 1733110986900 (+12 ms)Closed at 1733110986903 (+3 ms) 2024-12-02T03:43:06,904 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1733110985298.4c19cb1722a3797a9c03f9ec373f9549. 2024-12-02T03:43:06,912 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36013-0x101956c759f0000, quorum=127.0.0.1:51411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-02T03:43:06,912 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33091-0x101956c759f0001, quorum=127.0.0.1:51411, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/e2eaa0f11f7e,33091,1733110982783 2024-12-02T03:43:06,912 INFO [RS:0;e2eaa0f11f7e:33091 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-02T03:43:06,920 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [e2eaa0f11f7e,33091,1733110982783] 2024-12-02T03:43:06,923 DEBUG [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40287/user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/data/hbase/meta/1588230740/.tmp/info/fed7ebea64014d17a270faf18b511d5a is 153, key is TestHBaseWalOnEC,,1733110985298.4c19cb1722a3797a9c03f9ec373f9549./info:regioninfo/1733110986150/Put/seqid=0 2024-12-02T03:43:06,926 WARN [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-02T03:43:06,926 WARN [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-02T03:43:06,928 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/e2eaa0f11f7e,33091,1733110982783 already deleted, retry=false 2024-12-02T03:43:06,929 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; e2eaa0f11f7e,33091,1733110982783 expired; onlineServers=2 2024-12-02T03:43:06,929 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-697128772_22 at /127.0.0.1:59556 [Receiving block BP-1468540406-172.17.0.2-1733110978283:blk_-9223372036854775632_1026] {}] datanode.DataXceiver(331): 127.0.0.1:46389:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59556 dst: /127.0.0.1:46389 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T03:43:06,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46389 is added to blk_-9223372036854775632_1027 (size=6637) 2024-12-02T03:43:06,933 WARN [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-02T03:43:06,934 INFO [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40287/user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/data/hbase/meta/1588230740/.tmp/info/fed7ebea64014d17a270faf18b511d5a 2024-12-02T03:43:06,959 DEBUG [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40287/user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/data/hbase/meta/1588230740/.tmp/ns/aec166e8d8c34f14bdd9ba14b739fc9b is 43, key is default/ns:d/1733110985096/Put/seqid=0 2024-12-02T03:43:06,961 WARN [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-02T03:43:06,961 WARN [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-02T03:43:06,964 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-697128772_22 at /127.0.0.1:55430 [Receiving block BP-1468540406-172.17.0.2-1733110978283:blk_-9223372036854775616_1028] {}] datanode.DataXceiver(331): 127.0.0.1:36705:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55430 dst: /127.0.0.1:36705 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T03:43:06,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36705 is added to blk_-9223372036854775616_1029 (size=5153) 2024-12-02T03:43:06,969 WARN [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-02T03:43:06,969 INFO [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40287/user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/data/hbase/meta/1588230740/.tmp/ns/aec166e8d8c34f14bdd9ba14b739fc9b 2024-12-02T03:43:06,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36705 is added to blk_-9223372036854775757_1006 (size=196) 2024-12-02T03:43:06,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46389 is added to blk_-9223372036854775756_1006 (size=196) 2024-12-02T03:43:06,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37105 is added to blk_-9223372036854775773_1004 (size=42) 2024-12-02T03:43:06,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36705 is added to blk_-9223372036854775741_1008 (size=1189) 2024-12-02T03:43:06,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36705 is added to blk_-9223372036854775772_1004 (size=42) 2024-12-02T03:43:06,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46389 is added to blk_-9223372036854775740_1008 (size=1189) 2024-12-02T03:43:07,001 DEBUG [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40287/user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/data/hbase/meta/1588230740/.tmp/table/165c1925ac704e1f9ee4823c32acde07 is 52, key is TestHBaseWalOnEC/table:state/1733110986167/Put/seqid=0 2024-12-02T03:43:07,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46389 is added to blk_-9223372036854775692_1015 (size=32) 2024-12-02T03:43:07,003 WARN [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-02T03:43:07,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37105 is added to blk_-9223372036854775693_1015 (size=32) 2024-12-02T03:43:07,003 WARN [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-02T03:43:07,007 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-697128772_22 at /127.0.0.1:44220 [Receiving block BP-1468540406-172.17.0.2-1733110978283:blk_-9223372036854775600_1030] {}] datanode.DataXceiver(331): 127.0.0.1:37105:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44220 dst: /127.0.0.1:37105 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T03:43:07,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37105 is added to blk_-9223372036854775600_1031 (size=5249) 2024-12-02T03:43:07,013 WARN [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-02T03:43:07,013 INFO [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40287/user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/data/hbase/meta/1588230740/.tmp/table/165c1925ac704e1f9ee4823c32acde07 2024-12-02T03:43:07,021 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33091-0x101956c759f0001, quorum=127.0.0.1:51411, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T03:43:07,021 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33091-0x101956c759f0001, quorum=127.0.0.1:51411, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T03:43:07,021 INFO [RS:0;e2eaa0f11f7e:33091 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-02T03:43:07,021 INFO [RS:0;e2eaa0f11f7e:33091 {}] regionserver.HRegionServer(1031): Exiting; stopping=e2eaa0f11f7e,33091,1733110982783; zookeeper connection closed. 2024-12-02T03:43:07,022 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@733f5b2d {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@733f5b2d 2024-12-02T03:43:07,024 DEBUG [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40287/user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/data/hbase/meta/1588230740/.tmp/info/fed7ebea64014d17a270faf18b511d5a as hdfs://localhost:40287/user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/data/hbase/meta/1588230740/info/fed7ebea64014d17a270faf18b511d5a 2024-12-02T03:43:07,033 INFO [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40287/user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/data/hbase/meta/1588230740/info/fed7ebea64014d17a270faf18b511d5a, entries=10, sequenceid=11, filesize=6.5 K 2024-12-02T03:43:07,035 DEBUG [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40287/user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/data/hbase/meta/1588230740/.tmp/ns/aec166e8d8c34f14bdd9ba14b739fc9b as hdfs://localhost:40287/user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/data/hbase/meta/1588230740/ns/aec166e8d8c34f14bdd9ba14b739fc9b 2024-12-02T03:43:07,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46389 is added to blk_-9223372036854775709_1013 (size=1321) 2024-12-02T03:43:07,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37105 is added to blk_-9223372036854775708_1013 (size=1321) 2024-12-02T03:43:07,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46389 is added to blk_-9223372036854775725_1010 (size=34) 2024-12-02T03:43:07,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37105 is added to blk_-9223372036854775724_1010 (size=34) 2024-12-02T03:43:07,047 INFO [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40287/user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/data/hbase/meta/1588230740/ns/aec166e8d8c34f14bdd9ba14b739fc9b, entries=2, sequenceid=11, filesize=5.0 K 2024-12-02T03:43:07,049 DEBUG [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40287/user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/data/hbase/meta/1588230740/.tmp/table/165c1925ac704e1f9ee4823c32acde07 as hdfs://localhost:40287/user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/data/hbase/meta/1588230740/table/165c1925ac704e1f9ee4823c32acde07 2024-12-02T03:43:07,058 INFO [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40287/user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/data/hbase/meta/1588230740/table/165c1925ac704e1f9ee4823c32acde07, entries=2, sequenceid=11, filesize=5.1 K 2024-12-02T03:43:07,059 INFO [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 171ms, sequenceid=11, compaction requested=false 2024-12-02T03:43:07,059 DEBUG [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-02T03:43:07,067 DEBUG [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40287/user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-02T03:43:07,068 DEBUG [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-02T03:43:07,068 INFO [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-02T03:43:07,068 DEBUG [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733110986888Running coprocessor pre-close hooks at 1733110986888Disabling compacts and flushes for region at 1733110986888Disabling writes for close at 1733110986888Obtaining lock to block concurrent updates at 1733110986889 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1733110986889Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1733110986890 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733110986891 (+1 ms)Flushing 1588230740/info: creating writer at 1733110986892 (+1 ms)Flushing 1588230740/info: appending metadata at 1733110986919 (+27 ms)Flushing 1588230740/info: closing flushed file at 1733110986919Flushing 1588230740/ns: creating writer at 1733110986943 (+24 ms)Flushing 1588230740/ns: appending metadata at 1733110986958 (+15 ms)Flushing 1588230740/ns: closing flushed file at 1733110986958Flushing 1588230740/table: creating writer at 1733110986977 (+19 ms)Flushing 1588230740/table: appending metadata at 1733110987000 (+23 ms)Flushing 1588230740/table: closing flushed file at 1733110987000Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6cd83cd8: reopening flushed file at 1733110987022 (+22 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@66bc2949: reopening flushed file at 1733110987033 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@14eb5020: reopening flushed file at 1733110987047 (+14 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 171ms, sequenceid=11, compaction requested=false at 1733110987059 (+12 ms)Writing region close event to WAL at 1733110987061 (+2 ms)Running coprocessor post-close hooks at 1733110987068 (+7 ms)Closed at 1733110987068 2024-12-02T03:43:07,069 DEBUG [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-02T03:43:07,089 INFO [RS:1;e2eaa0f11f7e:40787 {}] regionserver.HRegionServer(976): stopping server e2eaa0f11f7e,40787,1733110982894; all regions closed. 2024-12-02T03:43:07,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37105 is added to blk_1073741829_1019 (size=2751) 2024-12-02T03:43:07,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36705 is added to blk_1073741829_1019 (size=2751) 2024-12-02T03:43:07,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46389 is added to blk_1073741829_1019 (size=2751) 2024-12-02T03:43:07,092 INFO [RS:2;e2eaa0f11f7e:33293 {}] regionserver.HRegionServer(976): stopping server e2eaa0f11f7e,33293,1733110982974; all regions closed. 2024-12-02T03:43:07,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46389 is added to blk_1073741826_1016 (size=1298) 2024-12-02T03:43:07,097 DEBUG [RS:1;e2eaa0f11f7e:40787 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/oldWALs 2024-12-02T03:43:07,097 INFO [RS:1;e2eaa0f11f7e:40787 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL e2eaa0f11f7e%2C40787%2C1733110982894.meta:.meta(num 1733110984898) 2024-12-02T03:43:07,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37105 is added to blk_1073741826_1016 (size=1298) 2024-12-02T03:43:07,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36705 is added to blk_1073741826_1016 (size=1298) 2024-12-02T03:43:07,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37105 is added to blk_1073741828_1018 (size=93) 2024-12-02T03:43:07,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36705 is added to blk_1073741828_1018 (size=93) 2024-12-02T03:43:07,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46389 is added to blk_1073741828_1018 (size=93) 2024-12-02T03:43:07,102 DEBUG [RS:2;e2eaa0f11f7e:33293 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/oldWALs 2024-12-02T03:43:07,102 INFO [RS:2;e2eaa0f11f7e:33293 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL e2eaa0f11f7e%2C33293%2C1733110982974:(num 1733110984464) 2024-12-02T03:43:07,102 DEBUG [RS:2;e2eaa0f11f7e:33293 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T03:43:07,102 INFO [RS:2;e2eaa0f11f7e:33293 {}] regionserver.LeaseManager(133): Closed leases 2024-12-02T03:43:07,102 INFO [RS:2;e2eaa0f11f7e:33293 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-02T03:43:07,102 INFO [RS:2;e2eaa0f11f7e:33293 {}] hbase.ChoreService(370): Chore service for: regionserver/e2eaa0f11f7e:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-02T03:43:07,103 INFO [RS:2;e2eaa0f11f7e:33293 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-02T03:43:07,103 INFO [RS:2;e2eaa0f11f7e:33293 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-02T03:43:07,103 INFO [regionserver/e2eaa0f11f7e:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-02T03:43:07,103 INFO [RS:2;e2eaa0f11f7e:33293 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-02T03:43:07,103 INFO [RS:2;e2eaa0f11f7e:33293 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-02T03:43:07,103 INFO [RS:2;e2eaa0f11f7e:33293 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33293 2024-12-02T03:43:07,104 DEBUG [RS:1;e2eaa0f11f7e:40787 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/oldWALs 2024-12-02T03:43:07,104 INFO [RS:1;e2eaa0f11f7e:40787 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL e2eaa0f11f7e%2C40787%2C1733110982894:(num 1733110984464) 2024-12-02T03:43:07,104 DEBUG [RS:1;e2eaa0f11f7e:40787 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T03:43:07,104 INFO [RS:1;e2eaa0f11f7e:40787 {}] regionserver.LeaseManager(133): Closed leases 2024-12-02T03:43:07,104 INFO [RS:1;e2eaa0f11f7e:40787 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-02T03:43:07,105 INFO [RS:1;e2eaa0f11f7e:40787 {}] hbase.ChoreService(370): Chore service for: regionserver/e2eaa0f11f7e:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-02T03:43:07,105 INFO [RS:1;e2eaa0f11f7e:40787 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-02T03:43:07,105 INFO [regionserver/e2eaa0f11f7e:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-02T03:43:07,105 INFO [RS:1;e2eaa0f11f7e:40787 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40787 2024-12-02T03:43:07,117 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x101956c759f0003, quorum=127.0.0.1:51411, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/e2eaa0f11f7e,33293,1733110982974 2024-12-02T03:43:07,117 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36013-0x101956c759f0000, quorum=127.0.0.1:51411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-02T03:43:07,117 INFO [RS:2;e2eaa0f11f7e:33293 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-02T03:43:07,125 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40787-0x101956c759f0002, quorum=127.0.0.1:51411, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/e2eaa0f11f7e,40787,1733110982894 2024-12-02T03:43:07,125 INFO [RS:1;e2eaa0f11f7e:40787 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-02T03:43:07,133 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [e2eaa0f11f7e,33293,1733110982974] 2024-12-02T03:43:07,150 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/e2eaa0f11f7e,33293,1733110982974 already deleted, retry=false 2024-12-02T03:43:07,150 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; e2eaa0f11f7e,33293,1733110982974 expired; onlineServers=1 2024-12-02T03:43:07,150 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [e2eaa0f11f7e,40787,1733110982894] 2024-12-02T03:43:07,162 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/e2eaa0f11f7e,40787,1733110982894 already deleted, retry=false 2024-12-02T03:43:07,162 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; e2eaa0f11f7e,40787,1733110982894 expired; onlineServers=0 2024-12-02T03:43:07,162 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'e2eaa0f11f7e,36013,1733110982095' ***** 2024-12-02T03:43:07,162 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-02T03:43:07,162 INFO [M:0;e2eaa0f11f7e:36013 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-02T03:43:07,162 INFO [M:0;e2eaa0f11f7e:36013 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-02T03:43:07,163 DEBUG [M:0;e2eaa0f11f7e:36013 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-02T03:43:07,163 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-02T03:43:07,163 DEBUG [M:0;e2eaa0f11f7e:36013 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-02T03:43:07,163 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster-HFileCleaner.large.0-1733110984067 {}] cleaner.HFileCleaner(306): Exit Thread[master/e2eaa0f11f7e:0:becomeActiveMaster-HFileCleaner.large.0-1733110984067,5,FailOnTimeoutGroup] 2024-12-02T03:43:07,163 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster-HFileCleaner.small.0-1733110984071 {}] cleaner.HFileCleaner(306): Exit Thread[master/e2eaa0f11f7e:0:becomeActiveMaster-HFileCleaner.small.0-1733110984071,5,FailOnTimeoutGroup] 2024-12-02T03:43:07,163 INFO [M:0;e2eaa0f11f7e:36013 {}] hbase.ChoreService(370): Chore service for: master/e2eaa0f11f7e:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-02T03:43:07,163 INFO [M:0;e2eaa0f11f7e:36013 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-02T03:43:07,164 DEBUG [M:0;e2eaa0f11f7e:36013 {}] master.HMaster(1795): Stopping service threads 2024-12-02T03:43:07,164 INFO [M:0;e2eaa0f11f7e:36013 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-02T03:43:07,164 INFO [M:0;e2eaa0f11f7e:36013 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-02T03:43:07,165 INFO [M:0;e2eaa0f11f7e:36013 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-02T03:43:07,165 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-02T03:43:07,170 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36013-0x101956c759f0000, quorum=127.0.0.1:51411, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-02T03:43:07,170 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36013-0x101956c759f0000, quorum=127.0.0.1:51411, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T03:43:07,171 DEBUG [M:0;e2eaa0f11f7e:36013 {}] zookeeper.ZKUtil(347): master:36013-0x101956c759f0000, quorum=127.0.0.1:51411, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-02T03:43:07,171 WARN [M:0;e2eaa0f11f7e:36013 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-02T03:43:07,172 INFO [M:0;e2eaa0f11f7e:36013 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:40287/user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/.lastflushedseqids 2024-12-02T03:43:07,180 WARN [M:0;e2eaa0f11f7e:36013 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-02T03:43:07,180 WARN [M:0;e2eaa0f11f7e:36013 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-02T03:43:07,183 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-829934895_22 at /127.0.0.1:59606 [Receiving block BP-1468540406-172.17.0.2-1733110978283:blk_-9223372036854775584_1032] {}] datanode.DataXceiver(331): 127.0.0.1:46389:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59606 dst: /127.0.0.1:46389 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T03:43:07,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46389 is added to blk_-9223372036854775584_1033 (size=127) 2024-12-02T03:43:07,187 WARN [M:0;e2eaa0f11f7e:36013 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-02T03:43:07,187 INFO [M:0;e2eaa0f11f7e:36013 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-02T03:43:07,187 INFO [M:0;e2eaa0f11f7e:36013 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-02T03:43:07,187 DEBUG [M:0;e2eaa0f11f7e:36013 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-02T03:43:07,187 INFO [M:0;e2eaa0f11f7e:36013 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T03:43:07,187 DEBUG [M:0;e2eaa0f11f7e:36013 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T03:43:07,187 DEBUG [M:0;e2eaa0f11f7e:36013 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-02T03:43:07,187 DEBUG [M:0;e2eaa0f11f7e:36013 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T03:43:07,187 INFO [M:0;e2eaa0f11f7e:36013 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.82 KB heapSize=34.11 KB 2024-12-02T03:43:07,205 DEBUG [M:0;e2eaa0f11f7e:36013 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40287/user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/bb989815ec114a2f841e9d707d567d68 is 82, key is hbase:meta,,1/info:regioninfo/1733110984985/Put/seqid=0 2024-12-02T03:43:07,206 WARN [M:0;e2eaa0f11f7e:36013 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-02T03:43:07,207 WARN [M:0;e2eaa0f11f7e:36013 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-02T03:43:07,209 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-829934895_22 at /127.0.0.1:59624 [Receiving block BP-1468540406-172.17.0.2-1733110978283:blk_-9223372036854775568_1034] {}] datanode.DataXceiver(331): 127.0.0.1:46389:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59624 dst: /127.0.0.1:46389 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T03:43:07,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46389 is added to blk_-9223372036854775568_1035 (size=5672) 2024-12-02T03:43:07,213 WARN [M:0;e2eaa0f11f7e:36013 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-02T03:43:07,214 INFO [M:0;e2eaa0f11f7e:36013 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:40287/user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/bb989815ec114a2f841e9d707d567d68 2024-12-02T03:43:07,234 INFO [RS:2;e2eaa0f11f7e:33293 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-02T03:43:07,234 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x101956c759f0003, quorum=127.0.0.1:51411, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T03:43:07,234 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33293-0x101956c759f0003, quorum=127.0.0.1:51411, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T03:43:07,234 INFO [RS:2;e2eaa0f11f7e:33293 {}] regionserver.HRegionServer(1031): Exiting; stopping=e2eaa0f11f7e,33293,1733110982974; zookeeper connection closed. 2024-12-02T03:43:07,234 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@4ed9d353 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@4ed9d353 2024-12-02T03:43:07,240 DEBUG [M:0;e2eaa0f11f7e:36013 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40287/user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/58695728bb714cd3a720d7d7821bca55 is 748, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733110986174/Put/seqid=0 2024-12-02T03:43:07,242 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40787-0x101956c759f0002, quorum=127.0.0.1:51411, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T03:43:07,242 INFO [RS:1;e2eaa0f11f7e:40787 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-02T03:43:07,242 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40787-0x101956c759f0002, quorum=127.0.0.1:51411, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T03:43:07,242 INFO [RS:1;e2eaa0f11f7e:40787 {}] regionserver.HRegionServer(1031): Exiting; stopping=e2eaa0f11f7e,40787,1733110982894; zookeeper connection closed. 2024-12-02T03:43:07,243 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3ec04a0a {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3ec04a0a 2024-12-02T03:43:07,243 WARN [M:0;e2eaa0f11f7e:36013 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-02T03:43:07,243 WARN [M:0;e2eaa0f11f7e:36013 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-02T03:43:07,243 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-02T03:43:07,246 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-829934895_22 at /127.0.0.1:59634 [Receiving block BP-1468540406-172.17.0.2-1733110978283:blk_-9223372036854775552_1036] {}] datanode.DataXceiver(331): 127.0.0.1:46389:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59634 dst: /127.0.0.1:46389 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T03:43:07,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46389 is added to blk_-9223372036854775552_1037 (size=6438) 2024-12-02T03:43:07,251 WARN [M:0;e2eaa0f11f7e:36013 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-02T03:43:07,252 INFO [M:0;e2eaa0f11f7e:36013 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.14 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:40287/user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/58695728bb714cd3a720d7d7821bca55 2024-12-02T03:43:07,275 DEBUG [M:0;e2eaa0f11f7e:36013 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40287/user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/58450fdabbb04f0a9e05270e424c7c63 is 69, key is e2eaa0f11f7e,33091,1733110982783/rs:state/1733110984149/Put/seqid=0 2024-12-02T03:43:07,277 WARN [M:0;e2eaa0f11f7e:36013 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-02T03:43:07,277 WARN [M:0;e2eaa0f11f7e:36013 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-02T03:43:07,280 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-829934895_22 at /127.0.0.1:55498 [Receiving block BP-1468540406-172.17.0.2-1733110978283:blk_-9223372036854775536_1038] {}] datanode.DataXceiver(331): 127.0.0.1:36705:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55498 dst: /127.0.0.1:36705 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T03:43:07,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36705 is added to blk_-9223372036854775536_1039 (size=5294) 2024-12-02T03:43:07,686 WARN [M:0;e2eaa0f11f7e:36013 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-02T03:43:07,687 INFO [M:0;e2eaa0f11f7e:36013 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:40287/user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/58450fdabbb04f0a9e05270e424c7c63 2024-12-02T03:43:07,703 DEBUG [M:0;e2eaa0f11f7e:36013 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40287/user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/bb989815ec114a2f841e9d707d567d68 as hdfs://localhost:40287/user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/bb989815ec114a2f841e9d707d567d68 2024-12-02T03:43:07,712 INFO [M:0;e2eaa0f11f7e:36013 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40287/user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/bb989815ec114a2f841e9d707d567d68, entries=8, sequenceid=72, filesize=5.5 K 2024-12-02T03:43:07,713 DEBUG [M:0;e2eaa0f11f7e:36013 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40287/user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/58695728bb714cd3a720d7d7821bca55 as hdfs://localhost:40287/user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/58695728bb714cd3a720d7d7821bca55 2024-12-02T03:43:07,721 INFO [M:0;e2eaa0f11f7e:36013 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40287/user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/58695728bb714cd3a720d7d7821bca55, entries=8, sequenceid=72, filesize=6.3 K 2024-12-02T03:43:07,723 DEBUG [M:0;e2eaa0f11f7e:36013 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40287/user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/58450fdabbb04f0a9e05270e424c7c63 as hdfs://localhost:40287/user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/58450fdabbb04f0a9e05270e424c7c63 2024-12-02T03:43:07,730 INFO [M:0;e2eaa0f11f7e:36013 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40287/user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/58450fdabbb04f0a9e05270e424c7c63, entries=3, sequenceid=72, filesize=5.2 K 2024-12-02T03:43:07,731 INFO [M:0;e2eaa0f11f7e:36013 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.82 KB/27462, heapSize ~33.81 KB/34624, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 544ms, sequenceid=72, compaction requested=false 2024-12-02T03:43:07,732 INFO [M:0;e2eaa0f11f7e:36013 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T03:43:07,732 DEBUG [M:0;e2eaa0f11f7e:36013 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733110987187Disabling compacts and flushes for region at 1733110987187Disabling writes for close at 1733110987187Obtaining lock to block concurrent updates at 1733110987187Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733110987187Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27462, getHeapSize=34864, getOffHeapSize=0, getCellsCount=85 at 1733110987188 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733110987189 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733110987189Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733110987204 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733110987204Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733110987221 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733110987240 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733110987240Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733110987259 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733110987275 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733110987275Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@651914ef: reopening flushed file at 1733110987702 (+427 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7738fae5: reopening flushed file at 1733110987712 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@37ffda82: reopening flushed file at 1733110987722 (+10 ms)Finished flush of dataSize ~26.82 KB/27462, heapSize ~33.81 KB/34624, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 544ms, sequenceid=72, compaction requested=false at 1733110987731 (+9 ms)Writing region close event to WAL at 1733110987732 (+1 ms)Closed at 1733110987732 2024-12-02T03:43:07,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36705 is added to blk_1073741825_1011 (size=32665) 2024-12-02T03:43:07,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37105 is added to blk_1073741825_1011 (size=32665) 2024-12-02T03:43:07,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46389 is added to blk_1073741825_1011 (size=32665) 2024-12-02T03:43:07,736 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-02T03:43:07,736 INFO [M:0;e2eaa0f11f7e:36013 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-02T03:43:07,736 INFO [M:0;e2eaa0f11f7e:36013 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36013 2024-12-02T03:43:07,737 INFO [M:0;e2eaa0f11f7e:36013 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-02T03:43:07,879 INFO [M:0;e2eaa0f11f7e:36013 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-02T03:43:07,879 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36013-0x101956c759f0000, quorum=127.0.0.1:51411, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T03:43:07,880 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36013-0x101956c759f0000, quorum=127.0.0.1:51411, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T03:43:07,887 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2e59159d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T03:43:07,890 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@a8e922f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T03:43:07,890 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T03:43:07,891 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@24f92c39{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T03:43:07,891 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@c62369b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2a1b248-510b-86d1-13a4-5d0f02634deb/hadoop.log.dir/,STOPPED} 2024-12-02T03:43:07,893 WARN [BP-1468540406-172.17.0.2-1733110978283 heartbeating to localhost/127.0.0.1:40287 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-02T03:43:07,893 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-02T03:43:07,893 WARN [BP-1468540406-172.17.0.2-1733110978283 heartbeating to localhost/127.0.0.1:40287 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1468540406-172.17.0.2-1733110978283 (Datanode Uuid 173fb258-e83f-477d-9e09-7a28ff063424) service to localhost/127.0.0.1:40287 2024-12-02T03:43:07,893 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-02T03:43:07,895 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2a1b248-510b-86d1-13a4-5d0f02634deb/cluster_b4d8d50d-9b9e-d2b6-d808-9746eed5b7c6/data/data5/current/BP-1468540406-172.17.0.2-1733110978283 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T03:43:07,895 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2a1b248-510b-86d1-13a4-5d0f02634deb/cluster_b4d8d50d-9b9e-d2b6-d808-9746eed5b7c6/data/data6/current/BP-1468540406-172.17.0.2-1733110978283 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T03:43:07,895 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-02T03:43:07,898 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1c6b8f01{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T03:43:07,898 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@11f28dd2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T03:43:07,898 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T03:43:07,898 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7fa8fa5c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T03:43:07,899 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6463ad04{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2a1b248-510b-86d1-13a4-5d0f02634deb/hadoop.log.dir/,STOPPED} 2024-12-02T03:43:07,900 WARN [BP-1468540406-172.17.0.2-1733110978283 heartbeating to localhost/127.0.0.1:40287 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-02T03:43:07,900 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-02T03:43:07,900 WARN [BP-1468540406-172.17.0.2-1733110978283 heartbeating to localhost/127.0.0.1:40287 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1468540406-172.17.0.2-1733110978283 (Datanode Uuid ace3bdc1-1fca-4a70-a64a-3486a129408c) service to localhost/127.0.0.1:40287 2024-12-02T03:43:07,900 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-02T03:43:07,900 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2a1b248-510b-86d1-13a4-5d0f02634deb/cluster_b4d8d50d-9b9e-d2b6-d808-9746eed5b7c6/data/data3/current/BP-1468540406-172.17.0.2-1733110978283 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T03:43:07,901 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2a1b248-510b-86d1-13a4-5d0f02634deb/cluster_b4d8d50d-9b9e-d2b6-d808-9746eed5b7c6/data/data4/current/BP-1468540406-172.17.0.2-1733110978283 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T03:43:07,901 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-02T03:43:07,903 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4839957b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T03:43:07,903 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5306f615{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T03:43:07,903 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T03:43:07,903 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1a2478ad{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T03:43:07,903 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@550154bd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2a1b248-510b-86d1-13a4-5d0f02634deb/hadoop.log.dir/,STOPPED} 2024-12-02T03:43:07,905 WARN [BP-1468540406-172.17.0.2-1733110978283 heartbeating to localhost/127.0.0.1:40287 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-02T03:43:07,905 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-02T03:43:07,905 WARN [BP-1468540406-172.17.0.2-1733110978283 heartbeating to localhost/127.0.0.1:40287 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1468540406-172.17.0.2-1733110978283 (Datanode Uuid 4259dd88-2ed2-4ea2-8ebe-a37dcf978cb6) service to localhost/127.0.0.1:40287 2024-12-02T03:43:07,905 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-02T03:43:07,905 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2a1b248-510b-86d1-13a4-5d0f02634deb/cluster_b4d8d50d-9b9e-d2b6-d808-9746eed5b7c6/data/data1/current/BP-1468540406-172.17.0.2-1733110978283 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T03:43:07,906 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2a1b248-510b-86d1-13a4-5d0f02634deb/cluster_b4d8d50d-9b9e-d2b6-d808-9746eed5b7c6/data/data2/current/BP-1468540406-172.17.0.2-1733110978283 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T03:43:07,906 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-02T03:43:07,913 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@76e4c45c{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-02T03:43:07,914 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4637aff6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T03:43:07,914 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T03:43:07,914 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@383d55e4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T03:43:07,914 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@21b7d177{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2a1b248-510b-86d1-13a4-5d0f02634deb/hadoop.log.dir/,STOPPED} 2024-12-02T03:43:07,925 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-02T03:43:07,951 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-02T03:43:07,956 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=92 (was 162), OpenFileDescriptor=439 (was 391) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=182 (was 172) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=8042 (was 8323) 2024-12-02T03:43:07,962 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=92, OpenFileDescriptor=439, MaxFileDescriptor=1048576, SystemLoadAverage=182, ProcessCount=11, AvailableMemoryMB=8042 2024-12-02T03:43:07,962 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-02T03:43:07,962 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2a1b248-510b-86d1-13a4-5d0f02634deb/hadoop.log.dir so I do NOT create it in target/test-data/c2a70ae2-417f-19d4-9b2e-705d583b69bc 2024-12-02T03:43:07,963 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2a1b248-510b-86d1-13a4-5d0f02634deb/hadoop.tmp.dir so I do NOT create it in target/test-data/c2a70ae2-417f-19d4-9b2e-705d583b69bc 2024-12-02T03:43:07,963 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c2a70ae2-417f-19d4-9b2e-705d583b69bc/cluster_1818e1a0-9560-afa9-e1ef-707f3c5f44ca, deleteOnExit=true 2024-12-02T03:43:07,963 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-02T03:43:07,963 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c2a70ae2-417f-19d4-9b2e-705d583b69bc/test.cache.data in system properties and HBase conf 2024-12-02T03:43:07,963 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c2a70ae2-417f-19d4-9b2e-705d583b69bc/hadoop.tmp.dir in system properties and HBase conf 2024-12-02T03:43:07,963 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c2a70ae2-417f-19d4-9b2e-705d583b69bc/hadoop.log.dir in system properties and HBase conf 2024-12-02T03:43:07,963 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c2a70ae2-417f-19d4-9b2e-705d583b69bc/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-02T03:43:07,963 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c2a70ae2-417f-19d4-9b2e-705d583b69bc/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-02T03:43:07,963 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-02T03:43:07,963 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-02T03:43:07,964 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c2a70ae2-417f-19d4-9b2e-705d583b69bc/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-02T03:43:07,964 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c2a70ae2-417f-19d4-9b2e-705d583b69bc/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-02T03:43:07,964 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c2a70ae2-417f-19d4-9b2e-705d583b69bc/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-02T03:43:07,964 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c2a70ae2-417f-19d4-9b2e-705d583b69bc/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-02T03:43:07,964 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c2a70ae2-417f-19d4-9b2e-705d583b69bc/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-02T03:43:07,964 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c2a70ae2-417f-19d4-9b2e-705d583b69bc/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-02T03:43:07,964 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c2a70ae2-417f-19d4-9b2e-705d583b69bc/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-02T03:43:07,964 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c2a70ae2-417f-19d4-9b2e-705d583b69bc/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-02T03:43:07,964 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c2a70ae2-417f-19d4-9b2e-705d583b69bc/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-02T03:43:07,964 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c2a70ae2-417f-19d4-9b2e-705d583b69bc/nfs.dump.dir in system properties and HBase conf 2024-12-02T03:43:07,964 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c2a70ae2-417f-19d4-9b2e-705d583b69bc/java.io.tmpdir in system properties and HBase conf 2024-12-02T03:43:07,964 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c2a70ae2-417f-19d4-9b2e-705d583b69bc/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-02T03:43:07,965 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c2a70ae2-417f-19d4-9b2e-705d583b69bc/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-02T03:43:07,965 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c2a70ae2-417f-19d4-9b2e-705d583b69bc/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-02T03:43:08,227 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T03:43:08,232 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T03:43:08,233 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T03:43:08,233 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T03:43:08,233 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-02T03:43:08,234 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T03:43:08,235 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@18f854cf{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c2a70ae2-417f-19d4-9b2e-705d583b69bc/hadoop.log.dir/,AVAILABLE} 2024-12-02T03:43:08,235 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@16eaa68d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T03:43:08,327 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6ffa125c{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c2a70ae2-417f-19d4-9b2e-705d583b69bc/java.io.tmpdir/jetty-localhost-41657-hadoop-hdfs-3_4_1-tests_jar-_-any-2582211107201669533/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-02T03:43:08,328 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3aa18531{HTTP/1.1, (http/1.1)}{localhost:41657} 2024-12-02T03:43:08,328 INFO [Time-limited test {}] server.Server(415): Started @11874ms 2024-12-02T03:43:08,535 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T03:43:08,538 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T03:43:08,540 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T03:43:08,540 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T03:43:08,540 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-02T03:43:08,540 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@137179d0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c2a70ae2-417f-19d4-9b2e-705d583b69bc/hadoop.log.dir/,AVAILABLE} 2024-12-02T03:43:08,541 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@61d23bc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T03:43:08,631 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@700f39d7{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c2a70ae2-417f-19d4-9b2e-705d583b69bc/java.io.tmpdir/jetty-localhost-40365-hadoop-hdfs-3_4_1-tests_jar-_-any-5450557086140820585/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T03:43:08,632 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4e9ae4fc{HTTP/1.1, (http/1.1)}{localhost:40365} 2024-12-02T03:43:08,632 INFO [Time-limited test {}] server.Server(415): Started @12178ms 2024-12-02T03:43:08,633 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T03:43:08,664 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T03:43:08,667 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T03:43:08,668 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T03:43:08,668 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T03:43:08,668 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-02T03:43:08,669 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2c597470{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c2a70ae2-417f-19d4-9b2e-705d583b69bc/hadoop.log.dir/,AVAILABLE} 2024-12-02T03:43:08,669 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4e5afbc4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T03:43:08,760 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6f8d2ee2{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c2a70ae2-417f-19d4-9b2e-705d583b69bc/java.io.tmpdir/jetty-localhost-34833-hadoop-hdfs-3_4_1-tests_jar-_-any-10714891785378491133/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T03:43:08,760 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6beabb01{HTTP/1.1, (http/1.1)}{localhost:34833} 2024-12-02T03:43:08,760 INFO [Time-limited test {}] server.Server(415): Started @12306ms 2024-12-02T03:43:08,762 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T03:43:08,800 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T03:43:08,803 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T03:43:08,804 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T03:43:08,804 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T03:43:08,804 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-02T03:43:08,805 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@73f6422f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c2a70ae2-417f-19d4-9b2e-705d583b69bc/hadoop.log.dir/,AVAILABLE} 2024-12-02T03:43:08,805 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4c77de1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T03:43:08,894 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6e89cb0b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c2a70ae2-417f-19d4-9b2e-705d583b69bc/java.io.tmpdir/jetty-localhost-43255-hadoop-hdfs-3_4_1-tests_jar-_-any-1179818503966706351/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T03:43:08,895 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6b3c8c82{HTTP/1.1, (http/1.1)}{localhost:43255} 2024-12-02T03:43:08,895 INFO [Time-limited test {}] server.Server(415): Started @12441ms 2024-12-02T03:43:08,896 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T03:43:09,514 WARN [Thread-570 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c2a70ae2-417f-19d4-9b2e-705d583b69bc/cluster_1818e1a0-9560-afa9-e1ef-707f3c5f44ca/data/data2/current/BP-1681032769-172.17.0.2-1733110987988/current, will proceed with Du for space computation calculation, 2024-12-02T03:43:09,514 WARN [Thread-569 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c2a70ae2-417f-19d4-9b2e-705d583b69bc/cluster_1818e1a0-9560-afa9-e1ef-707f3c5f44ca/data/data1/current/BP-1681032769-172.17.0.2-1733110987988/current, will proceed with Du for space computation calculation, 2024-12-02T03:43:09,527 WARN [Thread-510 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T03:43:09,530 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xda8af172b3ea35de with lease ID 0x60bcb552f442f65d: Processing first storage report for DS-9c3a758f-0041-4fe4-a122-a3de6a647719 from datanode DatanodeRegistration(127.0.0.1:39403, datanodeUuid=1be90b24-094d-4fd7-bdba-852cef2a1e5f, infoPort=32841, infoSecurePort=0, ipcPort=39621, storageInfo=lv=-57;cid=testClusterID;nsid=84317700;c=1733110987988) 2024-12-02T03:43:09,530 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xda8af172b3ea35de with lease ID 0x60bcb552f442f65d: from storage DS-9c3a758f-0041-4fe4-a122-a3de6a647719 node DatanodeRegistration(127.0.0.1:39403, datanodeUuid=1be90b24-094d-4fd7-bdba-852cef2a1e5f, infoPort=32841, infoSecurePort=0, ipcPort=39621, storageInfo=lv=-57;cid=testClusterID;nsid=84317700;c=1733110987988), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-02T03:43:09,530 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xda8af172b3ea35de with lease ID 0x60bcb552f442f65d: Processing first storage report for DS-80f346ce-f34e-4275-8c6c-bfeddb02cc0d from datanode DatanodeRegistration(127.0.0.1:39403, datanodeUuid=1be90b24-094d-4fd7-bdba-852cef2a1e5f, infoPort=32841, infoSecurePort=0, ipcPort=39621, storageInfo=lv=-57;cid=testClusterID;nsid=84317700;c=1733110987988) 2024-12-02T03:43:09,531 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xda8af172b3ea35de with lease ID 0x60bcb552f442f65d: from storage DS-80f346ce-f34e-4275-8c6c-bfeddb02cc0d node DatanodeRegistration(127.0.0.1:39403, datanodeUuid=1be90b24-094d-4fd7-bdba-852cef2a1e5f, infoPort=32841, infoSecurePort=0, ipcPort=39621, storageInfo=lv=-57;cid=testClusterID;nsid=84317700;c=1733110987988), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T03:43:09,660 WARN [Thread-581 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c2a70ae2-417f-19d4-9b2e-705d583b69bc/cluster_1818e1a0-9560-afa9-e1ef-707f3c5f44ca/data/data3/current/BP-1681032769-172.17.0.2-1733110987988/current, will proceed with Du for space computation calculation, 2024-12-02T03:43:09,661 WARN [Thread-582 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c2a70ae2-417f-19d4-9b2e-705d583b69bc/cluster_1818e1a0-9560-afa9-e1ef-707f3c5f44ca/data/data4/current/BP-1681032769-172.17.0.2-1733110987988/current, will proceed with Du for space computation calculation, 2024-12-02T03:43:09,676 WARN [Thread-533 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T03:43:09,679 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x616a7bb1685946b0 with lease ID 0x60bcb552f442f65e: Processing first storage report for DS-e1a20289-c8d5-43b1-a830-9f78d311a5d2 from datanode DatanodeRegistration(127.0.0.1:40671, datanodeUuid=0304e183-0ba4-45f7-8857-2c1b05faab38, infoPort=38641, infoSecurePort=0, ipcPort=34067, storageInfo=lv=-57;cid=testClusterID;nsid=84317700;c=1733110987988) 2024-12-02T03:43:09,679 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x616a7bb1685946b0 with lease ID 0x60bcb552f442f65e: from storage DS-e1a20289-c8d5-43b1-a830-9f78d311a5d2 node DatanodeRegistration(127.0.0.1:40671, datanodeUuid=0304e183-0ba4-45f7-8857-2c1b05faab38, infoPort=38641, infoSecurePort=0, ipcPort=34067, storageInfo=lv=-57;cid=testClusterID;nsid=84317700;c=1733110987988), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T03:43:09,679 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x616a7bb1685946b0 with lease ID 0x60bcb552f442f65e: Processing first storage report for DS-849ed8dd-df9a-49ae-a090-2908d732f39e from datanode DatanodeRegistration(127.0.0.1:40671, datanodeUuid=0304e183-0ba4-45f7-8857-2c1b05faab38, infoPort=38641, infoSecurePort=0, ipcPort=34067, storageInfo=lv=-57;cid=testClusterID;nsid=84317700;c=1733110987988) 2024-12-02T03:43:09,679 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x616a7bb1685946b0 with lease ID 0x60bcb552f442f65e: from storage DS-849ed8dd-df9a-49ae-a090-2908d732f39e node DatanodeRegistration(127.0.0.1:40671, datanodeUuid=0304e183-0ba4-45f7-8857-2c1b05faab38, infoPort=38641, infoSecurePort=0, ipcPort=34067, storageInfo=lv=-57;cid=testClusterID;nsid=84317700;c=1733110987988), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T03:43:09,751 WARN [Thread-592 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c2a70ae2-417f-19d4-9b2e-705d583b69bc/cluster_1818e1a0-9560-afa9-e1ef-707f3c5f44ca/data/data5/current/BP-1681032769-172.17.0.2-1733110987988/current, will proceed with Du for space computation calculation, 2024-12-02T03:43:09,751 WARN [Thread-593 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c2a70ae2-417f-19d4-9b2e-705d583b69bc/cluster_1818e1a0-9560-afa9-e1ef-707f3c5f44ca/data/data6/current/BP-1681032769-172.17.0.2-1733110987988/current, will proceed with Du for space computation calculation, 2024-12-02T03:43:09,771 WARN [Thread-555 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T03:43:09,773 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3534785242ce0fe0 with lease ID 0x60bcb552f442f65f: Processing first storage report for DS-939501fb-c3ca-4c52-88ec-258ddcd21f0d from datanode DatanodeRegistration(127.0.0.1:40641, datanodeUuid=45bd36b6-b207-4167-9c9f-110c8311064a, infoPort=43485, infoSecurePort=0, ipcPort=39727, storageInfo=lv=-57;cid=testClusterID;nsid=84317700;c=1733110987988) 2024-12-02T03:43:09,774 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3534785242ce0fe0 with lease ID 0x60bcb552f442f65f: from storage DS-939501fb-c3ca-4c52-88ec-258ddcd21f0d node DatanodeRegistration(127.0.0.1:40641, datanodeUuid=45bd36b6-b207-4167-9c9f-110c8311064a, infoPort=43485, infoSecurePort=0, ipcPort=39727, storageInfo=lv=-57;cid=testClusterID;nsid=84317700;c=1733110987988), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T03:43:09,774 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3534785242ce0fe0 with lease ID 0x60bcb552f442f65f: Processing first storage report for DS-de3dd1fc-d2a4-4f15-8250-225d2fa96e48 from datanode DatanodeRegistration(127.0.0.1:40641, datanodeUuid=45bd36b6-b207-4167-9c9f-110c8311064a, infoPort=43485, infoSecurePort=0, ipcPort=39727, storageInfo=lv=-57;cid=testClusterID;nsid=84317700;c=1733110987988) 2024-12-02T03:43:09,774 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3534785242ce0fe0 with lease ID 0x60bcb552f442f65f: from storage DS-de3dd1fc-d2a4-4f15-8250-225d2fa96e48 node DatanodeRegistration(127.0.0.1:40641, datanodeUuid=45bd36b6-b207-4167-9c9f-110c8311064a, infoPort=43485, infoSecurePort=0, ipcPort=39727, storageInfo=lv=-57;cid=testClusterID;nsid=84317700;c=1733110987988), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T03:43:09,841 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c2a70ae2-417f-19d4-9b2e-705d583b69bc 2024-12-02T03:43:09,844 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c2a70ae2-417f-19d4-9b2e-705d583b69bc/cluster_1818e1a0-9560-afa9-e1ef-707f3c5f44ca/zookeeper_0, clientPort=56492, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c2a70ae2-417f-19d4-9b2e-705d583b69bc/cluster_1818e1a0-9560-afa9-e1ef-707f3c5f44ca/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c2a70ae2-417f-19d4-9b2e-705d583b69bc/cluster_1818e1a0-9560-afa9-e1ef-707f3c5f44ca/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-02T03:43:09,845 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=56492 2024-12-02T03:43:09,845 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T03:43:09,847 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T03:43:09,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40641 is added to blk_1073741825_1001 (size=7) 2024-12-02T03:43:09,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39403 is added to blk_1073741825_1001 (size=7) 2024-12-02T03:43:09,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40671 is added to blk_1073741825_1001 (size=7) 2024-12-02T03:43:09,866 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:40451/user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2 with version=8 2024-12-02T03:43:09,866 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:40287/user/jenkins/test-data/a9ef76ae-2fe7-2dfd-f129-7ca3bf5bf777/hbase-staging 2024-12-02T03:43:09,868 INFO [Time-limited test {}] client.ConnectionUtils(128): master/e2eaa0f11f7e:0 server-side Connection retries=45 2024-12-02T03:43:09,868 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T03:43:09,868 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-02T03:43:09,868 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-02T03:43:09,868 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T03:43:09,868 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-02T03:43:09,868 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-02T03:43:09,868 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-02T03:43:09,869 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34035 2024-12-02T03:43:09,870 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:34035 connecting to ZooKeeper ensemble=127.0.0.1:56492 2024-12-02T03:43:09,915 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:340350x0, quorum=127.0.0.1:56492, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-02T03:43:09,916 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:34035-0x101956c96d80000 connected 2024-12-02T03:43:10,005 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T03:43:10,008 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T03:43:10,011 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34035-0x101956c96d80000, quorum=127.0.0.1:56492, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T03:43:10,012 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:40451/user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2, hbase.cluster.distributed=false 2024-12-02T03:43:10,014 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34035-0x101956c96d80000, quorum=127.0.0.1:56492, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-02T03:43:10,014 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34035 2024-12-02T03:43:10,015 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34035 2024-12-02T03:43:10,015 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34035 2024-12-02T03:43:10,016 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34035 2024-12-02T03:43:10,016 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34035 2024-12-02T03:43:10,032 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/e2eaa0f11f7e:0 server-side Connection retries=45 2024-12-02T03:43:10,032 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T03:43:10,033 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-02T03:43:10,033 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-02T03:43:10,033 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T03:43:10,033 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-02T03:43:10,033 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-02T03:43:10,033 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-02T03:43:10,034 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:32859 2024-12-02T03:43:10,035 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:32859 connecting to ZooKeeper ensemble=127.0.0.1:56492 2024-12-02T03:43:10,036 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T03:43:10,037 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T03:43:10,049 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:328590x0, quorum=127.0.0.1:56492, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-02T03:43:10,050 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:32859-0x101956c96d80001 connected 2024-12-02T03:43:10,050 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:32859-0x101956c96d80001, quorum=127.0.0.1:56492, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T03:43:10,050 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-02T03:43:10,051 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-02T03:43:10,052 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:32859-0x101956c96d80001, quorum=127.0.0.1:56492, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-02T03:43:10,053 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:32859-0x101956c96d80001, quorum=127.0.0.1:56492, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-02T03:43:10,054 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=32859 2024-12-02T03:43:10,054 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=32859 2024-12-02T03:43:10,055 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=32859 2024-12-02T03:43:10,055 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=32859 2024-12-02T03:43:10,056 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=32859 2024-12-02T03:43:10,074 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/e2eaa0f11f7e:0 server-side Connection retries=45 2024-12-02T03:43:10,074 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T03:43:10,074 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-02T03:43:10,074 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-02T03:43:10,074 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T03:43:10,074 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-02T03:43:10,075 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-02T03:43:10,075 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-02T03:43:10,075 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36583 2024-12-02T03:43:10,076 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:36583 connecting to ZooKeeper ensemble=127.0.0.1:56492 2024-12-02T03:43:10,077 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T03:43:10,078 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T03:43:10,091 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:365830x0, quorum=127.0.0.1:56492, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-02T03:43:10,091 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:365830x0, quorum=127.0.0.1:56492, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T03:43:10,091 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36583-0x101956c96d80002 connected 2024-12-02T03:43:10,092 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-02T03:43:10,092 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-02T03:43:10,093 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36583-0x101956c96d80002, quorum=127.0.0.1:56492, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-02T03:43:10,094 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36583-0x101956c96d80002, quorum=127.0.0.1:56492, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-02T03:43:10,095 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36583 2024-12-02T03:43:10,095 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36583 2024-12-02T03:43:10,095 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36583 2024-12-02T03:43:10,096 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36583 2024-12-02T03:43:10,096 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36583 2024-12-02T03:43:10,111 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/e2eaa0f11f7e:0 server-side Connection retries=45 2024-12-02T03:43:10,111 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T03:43:10,111 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-02T03:43:10,111 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-02T03:43:10,111 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T03:43:10,111 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-02T03:43:10,111 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-02T03:43:10,112 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-02T03:43:10,112 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:32983 2024-12-02T03:43:10,113 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:32983 connecting to ZooKeeper ensemble=127.0.0.1:56492 2024-12-02T03:43:10,114 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T03:43:10,116 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T03:43:10,128 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:329830x0, quorum=127.0.0.1:56492, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-02T03:43:10,129 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:329830x0, quorum=127.0.0.1:56492, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T03:43:10,129 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:32983-0x101956c96d80003 connected 2024-12-02T03:43:10,129 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-02T03:43:10,130 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-02T03:43:10,130 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:32983-0x101956c96d80003, quorum=127.0.0.1:56492, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-02T03:43:10,132 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:32983-0x101956c96d80003, quorum=127.0.0.1:56492, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-02T03:43:10,133 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=32983 2024-12-02T03:43:10,133 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=32983 2024-12-02T03:43:10,133 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=32983 2024-12-02T03:43:10,134 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=32983 2024-12-02T03:43:10,134 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=32983 2024-12-02T03:43:10,147 DEBUG [M:0;e2eaa0f11f7e:34035 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;e2eaa0f11f7e:34035 2024-12-02T03:43:10,148 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/e2eaa0f11f7e,34035,1733110989868 2024-12-02T03:43:10,158 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36583-0x101956c96d80002, quorum=127.0.0.1:56492, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T03:43:10,158 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32983-0x101956c96d80003, quorum=127.0.0.1:56492, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T03:43:10,158 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32859-0x101956c96d80001, quorum=127.0.0.1:56492, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T03:43:10,158 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34035-0x101956c96d80000, quorum=127.0.0.1:56492, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T03:43:10,158 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34035-0x101956c96d80000, quorum=127.0.0.1:56492, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/e2eaa0f11f7e,34035,1733110989868 2024-12-02T03:43:10,166 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32859-0x101956c96d80001, quorum=127.0.0.1:56492, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-02T03:43:10,166 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36583-0x101956c96d80002, quorum=127.0.0.1:56492, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-02T03:43:10,166 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32983-0x101956c96d80003, quorum=127.0.0.1:56492, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-02T03:43:10,166 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34035-0x101956c96d80000, quorum=127.0.0.1:56492, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T03:43:10,166 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32859-0x101956c96d80001, quorum=127.0.0.1:56492, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T03:43:10,166 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36583-0x101956c96d80002, quorum=127.0.0.1:56492, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T03:43:10,166 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32983-0x101956c96d80003, quorum=127.0.0.1:56492, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T03:43:10,167 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34035-0x101956c96d80000, quorum=127.0.0.1:56492, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-02T03:43:10,168 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/e2eaa0f11f7e,34035,1733110989868 from backup master directory 2024-12-02T03:43:10,178 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34035-0x101956c96d80000, quorum=127.0.0.1:56492, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/e2eaa0f11f7e,34035,1733110989868 2024-12-02T03:43:10,178 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32983-0x101956c96d80003, quorum=127.0.0.1:56492, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T03:43:10,178 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32859-0x101956c96d80001, quorum=127.0.0.1:56492, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T03:43:10,178 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36583-0x101956c96d80002, quorum=127.0.0.1:56492, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T03:43:10,178 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34035-0x101956c96d80000, quorum=127.0.0.1:56492, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T03:43:10,179 WARN [master/e2eaa0f11f7e:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-02T03:43:10,179 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=e2eaa0f11f7e,34035,1733110989868 2024-12-02T03:43:10,185 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:40451/user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2/hbase.id] with ID: 8c404df0-ffe5-408c-9e02-a4e67e9a1be2 2024-12-02T03:43:10,185 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:40451/user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2/.tmp/hbase.id 2024-12-02T03:43:10,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39403 is added to blk_1073741826_1002 (size=42) 2024-12-02T03:43:10,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40671 is added to blk_1073741826_1002 (size=42) 2024-12-02T03:43:10,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40641 is added to blk_1073741826_1002 (size=42) 2024-12-02T03:43:10,195 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:40451/user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2/.tmp/hbase.id]:[hdfs://localhost:40451/user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2/hbase.id] 2024-12-02T03:43:10,210 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T03:43:10,210 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-02T03:43:10,212 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-12-02T03:43:10,220 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34035-0x101956c96d80000, quorum=127.0.0.1:56492, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T03:43:10,220 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32859-0x101956c96d80001, quorum=127.0.0.1:56492, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T03:43:10,220 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32983-0x101956c96d80003, quorum=127.0.0.1:56492, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T03:43:10,220 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36583-0x101956c96d80002, quorum=127.0.0.1:56492, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T03:43:10,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40641 is added to blk_1073741827_1003 (size=196) 2024-12-02T03:43:10,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39403 is added to blk_1073741827_1003 (size=196) 2024-12-02T03:43:10,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40671 is added to blk_1073741827_1003 (size=196) 2024-12-02T03:43:10,231 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T03:43:10,232 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-02T03:43:10,233 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-02T03:43:10,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40671 is added to blk_1073741828_1004 (size=1189) 2024-12-02T03:43:10,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39403 is added to blk_1073741828_1004 (size=1189) 2024-12-02T03:43:10,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40641 is added to blk_1073741828_1004 (size=1189) 2024-12-02T03:43:10,246 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:40451/user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2/MasterData/data/master/store 2024-12-02T03:43:10,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40641 is added to blk_1073741829_1005 (size=34) 2024-12-02T03:43:10,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39403 is added to blk_1073741829_1005 (size=34) 2024-12-02T03:43:10,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40671 is added to blk_1073741829_1005 (size=34) 2024-12-02T03:43:10,254 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T03:43:10,255 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-02T03:43:10,255 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T03:43:10,255 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T03:43:10,255 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-02T03:43:10,255 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T03:43:10,255 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T03:43:10,255 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733110990255Disabling compacts and flushes for region at 1733110990255Disabling writes for close at 1733110990255Writing region close event to WAL at 1733110990255Closed at 1733110990255 2024-12-02T03:43:10,256 WARN [master/e2eaa0f11f7e:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:40451/user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2/MasterData/data/master/store/.initializing 2024-12-02T03:43:10,256 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:40451/user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2/MasterData/WALs/e2eaa0f11f7e,34035,1733110989868 2024-12-02T03:43:10,259 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e2eaa0f11f7e%2C34035%2C1733110989868, suffix=, logDir=hdfs://localhost:40451/user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2/MasterData/WALs/e2eaa0f11f7e,34035,1733110989868, archiveDir=hdfs://localhost:40451/user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2/MasterData/oldWALs, maxLogs=10 2024-12-02T03:43:10,260 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor e2eaa0f11f7e%2C34035%2C1733110989868.1733110990260 2024-12-02T03:43:10,269 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2/MasterData/WALs/e2eaa0f11f7e,34035,1733110989868/e2eaa0f11f7e%2C34035%2C1733110989868.1733110990260 2024-12-02T03:43:10,270 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43485:43485),(127.0.0.1/127.0.0.1:32841:32841),(127.0.0.1/127.0.0.1:38641:38641)] 2024-12-02T03:43:10,271 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-02T03:43:10,271 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T03:43:10,271 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T03:43:10,272 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T03:43:10,273 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T03:43:10,275 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-02T03:43:10,275 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T03:43:10,276 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T03:43:10,276 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T03:43:10,278 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-02T03:43:10,278 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T03:43:10,279 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T03:43:10,279 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T03:43:10,282 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-02T03:43:10,282 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T03:43:10,283 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T03:43:10,283 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T03:43:10,285 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-02T03:43:10,285 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T03:43:10,286 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T03:43:10,286 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T03:43:10,287 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40451/user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-02T03:43:10,287 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40451/user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-02T03:43:10,289 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T03:43:10,289 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T03:43:10,289 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-02T03:43:10,291 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T03:43:10,293 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40451/user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T03:43:10,294 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70842319, jitterRate=0.0556328147649765}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-02T03:43:10,294 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733110990272Initializing all the Stores at 1733110990273 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733110990273Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733110990273Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733110990273Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733110990273Cleaning up temporary data from old regions at 1733110990289 (+16 ms)Region opened successfully at 1733110990294 (+5 ms) 2024-12-02T03:43:10,295 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-02T03:43:10,298 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e8e01bc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e2eaa0f11f7e/172.17.0.2:0 2024-12-02T03:43:10,299 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-02T03:43:10,300 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-02T03:43:10,300 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-02T03:43:10,300 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-02T03:43:10,300 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-02T03:43:10,301 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-02T03:43:10,301 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-02T03:43:10,303 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-02T03:43:10,304 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34035-0x101956c96d80000, quorum=127.0.0.1:56492, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-02T03:43:10,316 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-02T03:43:10,316 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-02T03:43:10,317 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34035-0x101956c96d80000, quorum=127.0.0.1:56492, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-02T03:43:10,328 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-02T03:43:10,329 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-02T03:43:10,330 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34035-0x101956c96d80000, quorum=127.0.0.1:56492, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-02T03:43:10,341 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-02T03:43:10,343 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34035-0x101956c96d80000, quorum=127.0.0.1:56492, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-02T03:43:10,353 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-02T03:43:10,356 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34035-0x101956c96d80000, quorum=127.0.0.1:56492, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-02T03:43:10,366 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-02T03:43:10,374 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32859-0x101956c96d80001, quorum=127.0.0.1:56492, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-02T03:43:10,374 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34035-0x101956c96d80000, quorum=127.0.0.1:56492, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-02T03:43:10,374 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32983-0x101956c96d80003, quorum=127.0.0.1:56492, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-02T03:43:10,374 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36583-0x101956c96d80002, quorum=127.0.0.1:56492, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-02T03:43:10,374 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32859-0x101956c96d80001, quorum=127.0.0.1:56492, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T03:43:10,374 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34035-0x101956c96d80000, quorum=127.0.0.1:56492, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T03:43:10,374 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36583-0x101956c96d80002, quorum=127.0.0.1:56492, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T03:43:10,374 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32983-0x101956c96d80003, quorum=127.0.0.1:56492, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T03:43:10,375 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=e2eaa0f11f7e,34035,1733110989868, sessionid=0x101956c96d80000, setting cluster-up flag (Was=false) 2024-12-02T03:43:10,395 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32859-0x101956c96d80001, quorum=127.0.0.1:56492, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T03:43:10,395 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36583-0x101956c96d80002, quorum=127.0.0.1:56492, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T03:43:10,395 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32983-0x101956c96d80003, quorum=127.0.0.1:56492, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T03:43:10,395 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34035-0x101956c96d80000, quorum=127.0.0.1:56492, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T03:43:10,429 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-02T03:43:10,432 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=e2eaa0f11f7e,34035,1733110989868 2024-12-02T03:43:10,443 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-02T03:43:10,449 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T03:43:10,453 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36583-0x101956c96d80002, quorum=127.0.0.1:56492, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T03:43:10,453 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32859-0x101956c96d80001, quorum=127.0.0.1:56492, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T03:43:10,453 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34035-0x101956c96d80000, quorum=127.0.0.1:56492, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T03:43:10,453 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32983-0x101956c96d80003, quorum=127.0.0.1:56492, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T03:43:10,478 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-02T03:43:10,480 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=e2eaa0f11f7e,34035,1733110989868 2024-12-02T03:43:10,481 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:40451/user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-02T03:43:10,484 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-02T03:43:10,484 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-02T03:43:10,484 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-02T03:43:10,485 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: e2eaa0f11f7e,34035,1733110989868 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-02T03:43:10,486 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T03:43:10,486 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/e2eaa0f11f7e:0, corePoolSize=5, maxPoolSize=5 2024-12-02T03:43:10,486 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/e2eaa0f11f7e:0, corePoolSize=5, maxPoolSize=5 2024-12-02T03:43:10,486 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T03:43:10,486 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/e2eaa0f11f7e:0, corePoolSize=5, maxPoolSize=5 2024-12-02T03:43:10,486 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/e2eaa0f11f7e:0, corePoolSize=5, maxPoolSize=5 2024-12-02T03:43:10,486 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/e2eaa0f11f7e:0, corePoolSize=10, maxPoolSize=10 2024-12-02T03:43:10,486 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T03:43:10,487 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/e2eaa0f11f7e:0, corePoolSize=2, maxPoolSize=2 2024-12-02T03:43:10,487 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T03:43:10,488 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733111020488 2024-12-02T03:43:10,488 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-02T03:43:10,488 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-02T03:43:10,489 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-02T03:43:10,489 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-02T03:43:10,489 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-02T03:43:10,489 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-02T03:43:10,489 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-02T03:43:10,489 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-02T03:43:10,489 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-02T03:43:10,489 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-02T03:43:10,489 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-02T03:43:10,489 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-02T03:43:10,490 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-02T03:43:10,490 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-02T03:43:10,490 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/e2eaa0f11f7e:0:becomeActiveMaster-HFileCleaner.large.0-1733110990490,5,FailOnTimeoutGroup] 2024-12-02T03:43:10,491 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/e2eaa0f11f7e:0:becomeActiveMaster-HFileCleaner.small.0-1733110990490,5,FailOnTimeoutGroup] 2024-12-02T03:43:10,491 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-02T03:43:10,491 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T03:43:10,491 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-02T03:43:10,491 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-02T03:43:10,491 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-02T03:43:10,491 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-02T03:43:10,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40671 is added to blk_1073741831_1007 (size=1321) 2024-12-02T03:43:10,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39403 is added to blk_1073741831_1007 (size=1321) 2024-12-02T03:43:10,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40641 is added to blk_1073741831_1007 (size=1321) 2024-12-02T03:43:10,506 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:40451/user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-02T03:43:10,506 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:40451/user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2 2024-12-02T03:43:10,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40641 is added to blk_1073741832_1008 (size=32) 2024-12-02T03:43:10,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40671 is added to blk_1073741832_1008 (size=32) 2024-12-02T03:43:10,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39403 is added to blk_1073741832_1008 (size=32) 2024-12-02T03:43:10,516 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T03:43:10,518 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-02T03:43:10,520 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-02T03:43:10,520 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T03:43:10,520 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T03:43:10,520 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-02T03:43:10,522 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-02T03:43:10,522 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T03:43:10,522 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T03:43:10,523 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-02T03:43:10,524 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-02T03:43:10,524 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T03:43:10,525 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T03:43:10,525 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-02T03:43:10,526 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-02T03:43:10,526 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T03:43:10,527 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T03:43:10,527 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-02T03:43:10,528 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40451/user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2/data/hbase/meta/1588230740 2024-12-02T03:43:10,528 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40451/user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2/data/hbase/meta/1588230740 2024-12-02T03:43:10,530 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-02T03:43:10,530 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-02T03:43:10,531 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-02T03:43:10,532 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-02T03:43:10,535 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40451/user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T03:43:10,535 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73544833, jitterRate=0.0959034115076065}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-02T03:43:10,536 INFO [RS:0;e2eaa0f11f7e:32859 {}] regionserver.HRegionServer(746): ClusterId : 8c404df0-ffe5-408c-9e02-a4e67e9a1be2 2024-12-02T03:43:10,536 DEBUG [RS:0;e2eaa0f11f7e:32859 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-02T03:43:10,536 INFO [RS:1;e2eaa0f11f7e:36583 {}] regionserver.HRegionServer(746): ClusterId : 8c404df0-ffe5-408c-9e02-a4e67e9a1be2 2024-12-02T03:43:10,536 DEBUG [RS:1;e2eaa0f11f7e:36583 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-02T03:43:10,537 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733110990516Initializing all the Stores at 1733110990518 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733110990518Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733110990518Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733110990518Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733110990518Cleaning up temporary data from old regions at 1733110990530 (+12 ms)Region opened successfully at 1733110990537 (+7 ms) 2024-12-02T03:43:10,537 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-02T03:43:10,537 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-02T03:43:10,537 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-02T03:43:10,537 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-02T03:43:10,537 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-02T03:43:10,537 INFO [RS:2;e2eaa0f11f7e:32983 {}] regionserver.HRegionServer(746): ClusterId : 8c404df0-ffe5-408c-9e02-a4e67e9a1be2 2024-12-02T03:43:10,537 DEBUG [RS:2;e2eaa0f11f7e:32983 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-02T03:43:10,537 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-02T03:43:10,537 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733110990537Disabling compacts and flushes for region at 1733110990537Disabling writes for close at 1733110990537Writing region close event to WAL at 1733110990537Closed at 1733110990537 2024-12-02T03:43:10,539 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-02T03:43:10,539 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-02T03:43:10,539 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-02T03:43:10,541 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-02T03:43:10,543 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-02T03:43:10,575 DEBUG [RS:0;e2eaa0f11f7e:32859 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-02T03:43:10,575 DEBUG [RS:0;e2eaa0f11f7e:32859 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-02T03:43:10,575 DEBUG [RS:1;e2eaa0f11f7e:36583 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-02T03:43:10,575 DEBUG [RS:2;e2eaa0f11f7e:32983 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-02T03:43:10,575 DEBUG [RS:1;e2eaa0f11f7e:36583 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-02T03:43:10,575 DEBUG [RS:2;e2eaa0f11f7e:32983 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-02T03:43:10,595 DEBUG [RS:0;e2eaa0f11f7e:32859 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-02T03:43:10,595 DEBUG [RS:2;e2eaa0f11f7e:32983 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-02T03:43:10,595 DEBUG [RS:1;e2eaa0f11f7e:36583 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-02T03:43:10,596 DEBUG [RS:0;e2eaa0f11f7e:32859 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@47c37a6f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e2eaa0f11f7e/172.17.0.2:0 2024-12-02T03:43:10,596 DEBUG [RS:1;e2eaa0f11f7e:36583 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2011674c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e2eaa0f11f7e/172.17.0.2:0 2024-12-02T03:43:10,596 DEBUG [RS:2;e2eaa0f11f7e:32983 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@439a1743, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e2eaa0f11f7e/172.17.0.2:0 2024-12-02T03:43:10,606 DEBUG [RS:0;e2eaa0f11f7e:32859 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;e2eaa0f11f7e:32859 2024-12-02T03:43:10,606 DEBUG [RS:1;e2eaa0f11f7e:36583 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;e2eaa0f11f7e:36583 2024-12-02T03:43:10,606 INFO [RS:0;e2eaa0f11f7e:32859 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-02T03:43:10,606 INFO [RS:0;e2eaa0f11f7e:32859 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-02T03:43:10,606 INFO [RS:1;e2eaa0f11f7e:36583 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-02T03:43:10,606 DEBUG [RS:0;e2eaa0f11f7e:32859 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-02T03:43:10,606 INFO [RS:1;e2eaa0f11f7e:36583 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-02T03:43:10,606 DEBUG [RS:1;e2eaa0f11f7e:36583 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-02T03:43:10,607 INFO [RS:0;e2eaa0f11f7e:32859 {}] regionserver.HRegionServer(2659): reportForDuty to master=e2eaa0f11f7e,34035,1733110989868 with port=32859, startcode=1733110990032 2024-12-02T03:43:10,607 INFO [RS:1;e2eaa0f11f7e:36583 {}] regionserver.HRegionServer(2659): reportForDuty to master=e2eaa0f11f7e,34035,1733110989868 with port=36583, startcode=1733110990074 2024-12-02T03:43:10,607 DEBUG [RS:0;e2eaa0f11f7e:32859 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-02T03:43:10,607 DEBUG [RS:1;e2eaa0f11f7e:36583 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-02T03:43:10,610 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51599, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-12-02T03:43:10,610 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49135, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-12-02T03:43:10,611 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34035 {}] master.ServerManager(363): Checking decommissioned status of RegionServer e2eaa0f11f7e,36583,1733110990074 2024-12-02T03:43:10,611 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34035 {}] master.ServerManager(517): Registering regionserver=e2eaa0f11f7e,36583,1733110990074 2024-12-02T03:43:10,611 DEBUG [RS:2;e2eaa0f11f7e:32983 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;e2eaa0f11f7e:32983 2024-12-02T03:43:10,612 INFO [RS:2;e2eaa0f11f7e:32983 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-02T03:43:10,612 INFO [RS:2;e2eaa0f11f7e:32983 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-02T03:43:10,612 DEBUG [RS:2;e2eaa0f11f7e:32983 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-02T03:43:10,613 INFO [RS:2;e2eaa0f11f7e:32983 {}] regionserver.HRegionServer(2659): reportForDuty to master=e2eaa0f11f7e,34035,1733110989868 with port=32983, startcode=1733110990111 2024-12-02T03:43:10,613 DEBUG [RS:2;e2eaa0f11f7e:32983 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-02T03:43:10,613 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34035 {}] master.ServerManager(363): Checking decommissioned status of RegionServer e2eaa0f11f7e,32859,1733110990032 2024-12-02T03:43:10,613 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34035 {}] master.ServerManager(517): Registering regionserver=e2eaa0f11f7e,32859,1733110990032 2024-12-02T03:43:10,613 DEBUG [RS:1;e2eaa0f11f7e:36583 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40451/user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2 2024-12-02T03:43:10,613 DEBUG [RS:1;e2eaa0f11f7e:36583 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40451 2024-12-02T03:43:10,613 DEBUG [RS:1;e2eaa0f11f7e:36583 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-02T03:43:10,615 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44077, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-12-02T03:43:10,615 DEBUG [RS:0;e2eaa0f11f7e:32859 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40451/user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2 2024-12-02T03:43:10,615 DEBUG [RS:0;e2eaa0f11f7e:32859 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40451 2024-12-02T03:43:10,615 DEBUG [RS:0;e2eaa0f11f7e:32859 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-02T03:43:10,616 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34035 {}] master.ServerManager(363): Checking decommissioned status of RegionServer e2eaa0f11f7e,32983,1733110990111 2024-12-02T03:43:10,616 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34035 {}] master.ServerManager(517): Registering regionserver=e2eaa0f11f7e,32983,1733110990111 2024-12-02T03:43:10,618 DEBUG [RS:2;e2eaa0f11f7e:32983 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40451/user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2 2024-12-02T03:43:10,618 DEBUG [RS:2;e2eaa0f11f7e:32983 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40451 2024-12-02T03:43:10,618 DEBUG [RS:2;e2eaa0f11f7e:32983 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-02T03:43:10,624 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34035-0x101956c96d80000, quorum=127.0.0.1:56492, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-02T03:43:10,657 DEBUG [RS:1;e2eaa0f11f7e:36583 {}] zookeeper.ZKUtil(111): regionserver:36583-0x101956c96d80002, quorum=127.0.0.1:56492, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/e2eaa0f11f7e,36583,1733110990074 2024-12-02T03:43:10,657 WARN [RS:1;e2eaa0f11f7e:36583 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-02T03:43:10,657 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [e2eaa0f11f7e,32859,1733110990032] 2024-12-02T03:43:10,657 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [e2eaa0f11f7e,36583,1733110990074] 2024-12-02T03:43:10,657 INFO [RS:1;e2eaa0f11f7e:36583 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-02T03:43:10,657 DEBUG [RS:2;e2eaa0f11f7e:32983 {}] zookeeper.ZKUtil(111): regionserver:32983-0x101956c96d80003, quorum=127.0.0.1:56492, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/e2eaa0f11f7e,32983,1733110990111 2024-12-02T03:43:10,657 DEBUG [RS:0;e2eaa0f11f7e:32859 {}] zookeeper.ZKUtil(111): regionserver:32859-0x101956c96d80001, quorum=127.0.0.1:56492, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/e2eaa0f11f7e,32859,1733110990032 2024-12-02T03:43:10,657 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [e2eaa0f11f7e,32983,1733110990111] 2024-12-02T03:43:10,657 WARN [RS:2;e2eaa0f11f7e:32983 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-02T03:43:10,657 WARN [RS:0;e2eaa0f11f7e:32859 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-02T03:43:10,657 DEBUG [RS:1;e2eaa0f11f7e:36583 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40451/user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2/WALs/e2eaa0f11f7e,36583,1733110990074 2024-12-02T03:43:10,657 INFO [RS:0;e2eaa0f11f7e:32859 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-02T03:43:10,657 INFO [RS:2;e2eaa0f11f7e:32983 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-02T03:43:10,658 DEBUG [RS:0;e2eaa0f11f7e:32859 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40451/user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2/WALs/e2eaa0f11f7e,32859,1733110990032 2024-12-02T03:43:10,658 DEBUG [RS:2;e2eaa0f11f7e:32983 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40451/user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2/WALs/e2eaa0f11f7e,32983,1733110990111 2024-12-02T03:43:10,663 INFO [RS:1;e2eaa0f11f7e:36583 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-02T03:43:10,663 INFO [RS:2;e2eaa0f11f7e:32983 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-02T03:43:10,663 INFO [RS:0;e2eaa0f11f7e:32859 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-02T03:43:10,666 INFO [RS:2;e2eaa0f11f7e:32983 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-02T03:43:10,678 INFO [RS:1;e2eaa0f11f7e:36583 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-02T03:43:10,678 INFO [RS:0;e2eaa0f11f7e:32859 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-02T03:43:10,678 INFO [RS:2;e2eaa0f11f7e:32983 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-02T03:43:10,678 INFO [RS:2;e2eaa0f11f7e:32983 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T03:43:10,678 INFO [RS:2;e2eaa0f11f7e:32983 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-02T03:43:10,678 INFO [RS:0;e2eaa0f11f7e:32859 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-02T03:43:10,678 INFO [RS:0;e2eaa0f11f7e:32859 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T03:43:10,679 INFO [RS:1;e2eaa0f11f7e:36583 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-02T03:43:10,679 INFO [RS:1;e2eaa0f11f7e:36583 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T03:43:10,679 INFO [RS:0;e2eaa0f11f7e:32859 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-02T03:43:10,679 INFO [RS:1;e2eaa0f11f7e:36583 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-02T03:43:10,680 INFO [RS:2;e2eaa0f11f7e:32983 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-02T03:43:10,680 INFO [RS:0;e2eaa0f11f7e:32859 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-02T03:43:10,680 INFO [RS:1;e2eaa0f11f7e:36583 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-02T03:43:10,680 INFO [RS:2;e2eaa0f11f7e:32983 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-02T03:43:10,680 INFO [RS:1;e2eaa0f11f7e:36583 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-02T03:43:10,680 INFO [RS:0;e2eaa0f11f7e:32859 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-02T03:43:10,680 DEBUG [RS:2;e2eaa0f11f7e:32983 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T03:43:10,680 DEBUG [RS:0;e2eaa0f11f7e:32859 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T03:43:10,680 DEBUG [RS:2;e2eaa0f11f7e:32983 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T03:43:10,680 DEBUG [RS:0;e2eaa0f11f7e:32859 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T03:43:10,681 DEBUG [RS:2;e2eaa0f11f7e:32983 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T03:43:10,681 DEBUG [RS:0;e2eaa0f11f7e:32859 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T03:43:10,681 DEBUG [RS:2;e2eaa0f11f7e:32983 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T03:43:10,681 DEBUG [RS:0;e2eaa0f11f7e:32859 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T03:43:10,681 DEBUG [RS:2;e2eaa0f11f7e:32983 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T03:43:10,681 DEBUG [RS:0;e2eaa0f11f7e:32859 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T03:43:10,681 DEBUG [RS:2;e2eaa0f11f7e:32983 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/e2eaa0f11f7e:0, corePoolSize=2, maxPoolSize=2 2024-12-02T03:43:10,681 DEBUG [RS:0;e2eaa0f11f7e:32859 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/e2eaa0f11f7e:0, corePoolSize=2, maxPoolSize=2 2024-12-02T03:43:10,681 DEBUG [RS:2;e2eaa0f11f7e:32983 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T03:43:10,681 DEBUG [RS:0;e2eaa0f11f7e:32859 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T03:43:10,681 DEBUG [RS:2;e2eaa0f11f7e:32983 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T03:43:10,681 DEBUG [RS:0;e2eaa0f11f7e:32859 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T03:43:10,681 DEBUG [RS:2;e2eaa0f11f7e:32983 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T03:43:10,681 DEBUG [RS:0;e2eaa0f11f7e:32859 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T03:43:10,681 DEBUG [RS:2;e2eaa0f11f7e:32983 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T03:43:10,681 DEBUG [RS:0;e2eaa0f11f7e:32859 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T03:43:10,681 DEBUG [RS:2;e2eaa0f11f7e:32983 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T03:43:10,681 DEBUG [RS:0;e2eaa0f11f7e:32859 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T03:43:10,681 DEBUG [RS:2;e2eaa0f11f7e:32983 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T03:43:10,681 DEBUG [RS:0;e2eaa0f11f7e:32859 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T03:43:10,681 DEBUG [RS:2;e2eaa0f11f7e:32983 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0, corePoolSize=3, maxPoolSize=3 2024-12-02T03:43:10,681 DEBUG [RS:0;e2eaa0f11f7e:32859 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0, corePoolSize=3, maxPoolSize=3 2024-12-02T03:43:10,681 DEBUG [RS:2;e2eaa0f11f7e:32983 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/e2eaa0f11f7e:0, corePoolSize=3, maxPoolSize=3 2024-12-02T03:43:10,681 DEBUG [RS:0;e2eaa0f11f7e:32859 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/e2eaa0f11f7e:0, corePoolSize=3, maxPoolSize=3 2024-12-02T03:43:10,681 DEBUG [RS:1;e2eaa0f11f7e:36583 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T03:43:10,681 DEBUG [RS:1;e2eaa0f11f7e:36583 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T03:43:10,681 DEBUG [RS:1;e2eaa0f11f7e:36583 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T03:43:10,681 DEBUG [RS:1;e2eaa0f11f7e:36583 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T03:43:10,681 DEBUG [RS:1;e2eaa0f11f7e:36583 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T03:43:10,682 DEBUG [RS:1;e2eaa0f11f7e:36583 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/e2eaa0f11f7e:0, corePoolSize=2, maxPoolSize=2 2024-12-02T03:43:10,682 DEBUG [RS:1;e2eaa0f11f7e:36583 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T03:43:10,682 DEBUG [RS:1;e2eaa0f11f7e:36583 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T03:43:10,682 DEBUG [RS:1;e2eaa0f11f7e:36583 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T03:43:10,682 DEBUG [RS:1;e2eaa0f11f7e:36583 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T03:43:10,682 DEBUG [RS:1;e2eaa0f11f7e:36583 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T03:43:10,682 DEBUG [RS:1;e2eaa0f11f7e:36583 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/e2eaa0f11f7e:0, corePoolSize=1, maxPoolSize=1 2024-12-02T03:43:10,682 DEBUG [RS:1;e2eaa0f11f7e:36583 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/e2eaa0f11f7e:0, corePoolSize=3, maxPoolSize=3 2024-12-02T03:43:10,682 DEBUG [RS:1;e2eaa0f11f7e:36583 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/e2eaa0f11f7e:0, corePoolSize=3, maxPoolSize=3 2024-12-02T03:43:10,686 INFO [RS:2;e2eaa0f11f7e:32983 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T03:43:10,687 INFO [RS:0;e2eaa0f11f7e:32859 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T03:43:10,687 INFO [RS:2;e2eaa0f11f7e:32983 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T03:43:10,687 INFO [RS:0;e2eaa0f11f7e:32859 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T03:43:10,687 INFO [RS:2;e2eaa0f11f7e:32983 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T03:43:10,687 INFO [RS:0;e2eaa0f11f7e:32859 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T03:43:10,687 INFO [RS:2;e2eaa0f11f7e:32983 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-02T03:43:10,687 INFO [RS:0;e2eaa0f11f7e:32859 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-02T03:43:10,687 INFO [RS:2;e2eaa0f11f7e:32983 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-02T03:43:10,687 INFO [RS:0;e2eaa0f11f7e:32859 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-02T03:43:10,687 INFO [RS:2;e2eaa0f11f7e:32983 {}] hbase.ChoreService(168): Chore ScheduledChore name=e2eaa0f11f7e,32983,1733110990111-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-02T03:43:10,687 INFO [RS:0;e2eaa0f11f7e:32859 {}] hbase.ChoreService(168): Chore ScheduledChore name=e2eaa0f11f7e,32859,1733110990032-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-02T03:43:10,688 INFO [RS:1;e2eaa0f11f7e:36583 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T03:43:10,688 INFO [RS:1;e2eaa0f11f7e:36583 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T03:43:10,688 INFO [RS:1;e2eaa0f11f7e:36583 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T03:43:10,688 INFO [RS:1;e2eaa0f11f7e:36583 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-02T03:43:10,688 INFO [RS:1;e2eaa0f11f7e:36583 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-02T03:43:10,688 INFO [RS:1;e2eaa0f11f7e:36583 {}] hbase.ChoreService(168): Chore ScheduledChore name=e2eaa0f11f7e,36583,1733110990074-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-02T03:43:10,693 WARN [e2eaa0f11f7e:34035 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-02T03:43:10,702 INFO [RS:1;e2eaa0f11f7e:36583 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-02T03:43:10,702 INFO [RS:1;e2eaa0f11f7e:36583 {}] hbase.ChoreService(168): Chore ScheduledChore name=e2eaa0f11f7e,36583,1733110990074-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T03:43:10,702 INFO [RS:1;e2eaa0f11f7e:36583 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T03:43:10,702 INFO [RS:1;e2eaa0f11f7e:36583 {}] regionserver.Replication(171): e2eaa0f11f7e,36583,1733110990074 started 2024-12-02T03:43:10,708 INFO [RS:2;e2eaa0f11f7e:32983 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-02T03:43:10,708 INFO [RS:0;e2eaa0f11f7e:32859 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-02T03:43:10,708 INFO [RS:2;e2eaa0f11f7e:32983 {}] hbase.ChoreService(168): Chore ScheduledChore name=e2eaa0f11f7e,32983,1733110990111-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T03:43:10,708 INFO [RS:0;e2eaa0f11f7e:32859 {}] hbase.ChoreService(168): Chore ScheduledChore name=e2eaa0f11f7e,32859,1733110990032-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T03:43:10,709 INFO [RS:2;e2eaa0f11f7e:32983 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T03:43:10,709 INFO [RS:0;e2eaa0f11f7e:32859 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T03:43:10,709 INFO [RS:2;e2eaa0f11f7e:32983 {}] regionserver.Replication(171): e2eaa0f11f7e,32983,1733110990111 started 2024-12-02T03:43:10,709 INFO [RS:0;e2eaa0f11f7e:32859 {}] regionserver.Replication(171): e2eaa0f11f7e,32859,1733110990032 started 2024-12-02T03:43:10,715 INFO [RS:1;e2eaa0f11f7e:36583 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T03:43:10,715 INFO [RS:1;e2eaa0f11f7e:36583 {}] regionserver.HRegionServer(1482): Serving as e2eaa0f11f7e,36583,1733110990074, RpcServer on e2eaa0f11f7e/172.17.0.2:36583, sessionid=0x101956c96d80002 2024-12-02T03:43:10,716 DEBUG [RS:1;e2eaa0f11f7e:36583 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-02T03:43:10,716 DEBUG [RS:1;e2eaa0f11f7e:36583 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager e2eaa0f11f7e,36583,1733110990074 2024-12-02T03:43:10,716 DEBUG [RS:1;e2eaa0f11f7e:36583 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e2eaa0f11f7e,36583,1733110990074' 2024-12-02T03:43:10,716 DEBUG [RS:1;e2eaa0f11f7e:36583 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-02T03:43:10,717 DEBUG [RS:1;e2eaa0f11f7e:36583 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-02T03:43:10,717 DEBUG [RS:1;e2eaa0f11f7e:36583 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-02T03:43:10,717 DEBUG [RS:1;e2eaa0f11f7e:36583 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-02T03:43:10,717 DEBUG [RS:1;e2eaa0f11f7e:36583 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager e2eaa0f11f7e,36583,1733110990074 2024-12-02T03:43:10,717 DEBUG [RS:1;e2eaa0f11f7e:36583 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e2eaa0f11f7e,36583,1733110990074' 2024-12-02T03:43:10,717 DEBUG [RS:1;e2eaa0f11f7e:36583 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-02T03:43:10,718 DEBUG [RS:1;e2eaa0f11f7e:36583 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-02T03:43:10,718 DEBUG [RS:1;e2eaa0f11f7e:36583 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-02T03:43:10,718 INFO [RS:1;e2eaa0f11f7e:36583 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-02T03:43:10,718 INFO [RS:1;e2eaa0f11f7e:36583 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-02T03:43:10,726 INFO [RS:2;e2eaa0f11f7e:32983 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T03:43:10,726 INFO [RS:0;e2eaa0f11f7e:32859 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T03:43:10,726 INFO [RS:2;e2eaa0f11f7e:32983 {}] regionserver.HRegionServer(1482): Serving as e2eaa0f11f7e,32983,1733110990111, RpcServer on e2eaa0f11f7e/172.17.0.2:32983, sessionid=0x101956c96d80003 2024-12-02T03:43:10,726 INFO [RS:0;e2eaa0f11f7e:32859 {}] regionserver.HRegionServer(1482): Serving as e2eaa0f11f7e,32859,1733110990032, RpcServer on e2eaa0f11f7e/172.17.0.2:32859, sessionid=0x101956c96d80001 2024-12-02T03:43:10,727 DEBUG [RS:2;e2eaa0f11f7e:32983 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-02T03:43:10,727 DEBUG [RS:0;e2eaa0f11f7e:32859 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-02T03:43:10,727 DEBUG [RS:2;e2eaa0f11f7e:32983 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager e2eaa0f11f7e,32983,1733110990111 2024-12-02T03:43:10,727 DEBUG [RS:0;e2eaa0f11f7e:32859 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager e2eaa0f11f7e,32859,1733110990032 2024-12-02T03:43:10,727 DEBUG [RS:2;e2eaa0f11f7e:32983 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e2eaa0f11f7e,32983,1733110990111' 2024-12-02T03:43:10,727 DEBUG [RS:0;e2eaa0f11f7e:32859 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e2eaa0f11f7e,32859,1733110990032' 2024-12-02T03:43:10,727 DEBUG [RS:2;e2eaa0f11f7e:32983 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-02T03:43:10,727 DEBUG [RS:0;e2eaa0f11f7e:32859 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-02T03:43:10,727 DEBUG [RS:2;e2eaa0f11f7e:32983 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-02T03:43:10,727 DEBUG [RS:0;e2eaa0f11f7e:32859 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-02T03:43:10,728 DEBUG [RS:2;e2eaa0f11f7e:32983 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-02T03:43:10,728 DEBUG [RS:0;e2eaa0f11f7e:32859 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-02T03:43:10,728 DEBUG [RS:2;e2eaa0f11f7e:32983 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-02T03:43:10,728 DEBUG [RS:0;e2eaa0f11f7e:32859 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-02T03:43:10,728 DEBUG [RS:2;e2eaa0f11f7e:32983 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager e2eaa0f11f7e,32983,1733110990111 2024-12-02T03:43:10,728 DEBUG [RS:0;e2eaa0f11f7e:32859 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager e2eaa0f11f7e,32859,1733110990032 2024-12-02T03:43:10,728 DEBUG [RS:2;e2eaa0f11f7e:32983 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e2eaa0f11f7e,32983,1733110990111' 2024-12-02T03:43:10,728 DEBUG [RS:0;e2eaa0f11f7e:32859 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e2eaa0f11f7e,32859,1733110990032' 2024-12-02T03:43:10,728 DEBUG [RS:2;e2eaa0f11f7e:32983 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-02T03:43:10,728 DEBUG [RS:0;e2eaa0f11f7e:32859 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-02T03:43:10,728 DEBUG [RS:2;e2eaa0f11f7e:32983 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-02T03:43:10,728 DEBUG [RS:0;e2eaa0f11f7e:32859 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-02T03:43:10,729 DEBUG [RS:0;e2eaa0f11f7e:32859 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-02T03:43:10,729 DEBUG [RS:2;e2eaa0f11f7e:32983 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-02T03:43:10,729 INFO [RS:0;e2eaa0f11f7e:32859 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-02T03:43:10,729 INFO [RS:2;e2eaa0f11f7e:32983 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-02T03:43:10,729 INFO [RS:0;e2eaa0f11f7e:32859 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-02T03:43:10,729 INFO [RS:2;e2eaa0f11f7e:32983 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-02T03:43:10,823 INFO [RS:1;e2eaa0f11f7e:36583 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e2eaa0f11f7e%2C36583%2C1733110990074, suffix=, logDir=hdfs://localhost:40451/user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2/WALs/e2eaa0f11f7e,36583,1733110990074, archiveDir=hdfs://localhost:40451/user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2/oldWALs, maxLogs=32 2024-12-02T03:43:10,826 INFO [RS:1;e2eaa0f11f7e:36583 {}] monitor.StreamSlowMonitor(122): New stream slow monitor e2eaa0f11f7e%2C36583%2C1733110990074.1733110990825 2024-12-02T03:43:10,831 INFO [RS:0;e2eaa0f11f7e:32859 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e2eaa0f11f7e%2C32859%2C1733110990032, suffix=, logDir=hdfs://localhost:40451/user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2/WALs/e2eaa0f11f7e,32859,1733110990032, archiveDir=hdfs://localhost:40451/user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2/oldWALs, maxLogs=32 2024-12-02T03:43:10,831 INFO [RS:2;e2eaa0f11f7e:32983 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e2eaa0f11f7e%2C32983%2C1733110990111, suffix=, logDir=hdfs://localhost:40451/user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2/WALs/e2eaa0f11f7e,32983,1733110990111, archiveDir=hdfs://localhost:40451/user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2/oldWALs, maxLogs=32 2024-12-02T03:43:10,833 INFO [RS:2;e2eaa0f11f7e:32983 {}] monitor.StreamSlowMonitor(122): New stream slow monitor e2eaa0f11f7e%2C32983%2C1733110990111.1733110990832 2024-12-02T03:43:10,833 INFO [RS:0;e2eaa0f11f7e:32859 {}] monitor.StreamSlowMonitor(122): New stream slow monitor e2eaa0f11f7e%2C32859%2C1733110990032.1733110990832 2024-12-02T03:43:10,836 INFO [RS:1;e2eaa0f11f7e:36583 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2/WALs/e2eaa0f11f7e,36583,1733110990074/e2eaa0f11f7e%2C36583%2C1733110990074.1733110990825 2024-12-02T03:43:10,839 DEBUG [RS:1;e2eaa0f11f7e:36583 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:32841:32841),(127.0.0.1/127.0.0.1:43485:43485),(127.0.0.1/127.0.0.1:38641:38641)] 2024-12-02T03:43:10,849 INFO [RS:0;e2eaa0f11f7e:32859 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2/WALs/e2eaa0f11f7e,32859,1733110990032/e2eaa0f11f7e%2C32859%2C1733110990032.1733110990832 2024-12-02T03:43:10,849 INFO [RS:2;e2eaa0f11f7e:32983 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2/WALs/e2eaa0f11f7e,32983,1733110990111/e2eaa0f11f7e%2C32983%2C1733110990111.1733110990832 2024-12-02T03:43:10,852 DEBUG [RS:0;e2eaa0f11f7e:32859 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38641:38641),(127.0.0.1/127.0.0.1:32841:32841),(127.0.0.1/127.0.0.1:43485:43485)] 2024-12-02T03:43:10,852 DEBUG [RS:2;e2eaa0f11f7e:32983 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:32841:32841),(127.0.0.1/127.0.0.1:43485:43485),(127.0.0.1/127.0.0.1:38641:38641)] 2024-12-02T03:43:10,944 DEBUG [e2eaa0f11f7e:34035 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-02T03:43:10,944 DEBUG [e2eaa0f11f7e:34035 {}] balancer.BalancerClusterState(204): Hosts are {e2eaa0f11f7e=0} racks are {/default-rack=0} 2024-12-02T03:43:10,950 DEBUG [e2eaa0f11f7e:34035 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-02T03:43:10,950 DEBUG [e2eaa0f11f7e:34035 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-02T03:43:10,950 DEBUG [e2eaa0f11f7e:34035 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-02T03:43:10,950 DEBUG [e2eaa0f11f7e:34035 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-02T03:43:10,950 DEBUG [e2eaa0f11f7e:34035 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-02T03:43:10,950 DEBUG [e2eaa0f11f7e:34035 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-02T03:43:10,950 INFO [e2eaa0f11f7e:34035 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-02T03:43:10,951 INFO [e2eaa0f11f7e:34035 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-02T03:43:10,951 INFO [e2eaa0f11f7e:34035 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-02T03:43:10,951 DEBUG [e2eaa0f11f7e:34035 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-02T03:43:10,951 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=e2eaa0f11f7e,32859,1733110990032 2024-12-02T03:43:10,953 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as e2eaa0f11f7e,32859,1733110990032, state=OPENING 2024-12-02T03:43:10,970 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-02T03:43:10,978 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32983-0x101956c96d80003, quorum=127.0.0.1:56492, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T03:43:10,978 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32859-0x101956c96d80001, quorum=127.0.0.1:56492, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T03:43:10,978 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34035-0x101956c96d80000, quorum=127.0.0.1:56492, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T03:43:10,978 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36583-0x101956c96d80002, quorum=127.0.0.1:56492, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T03:43:10,979 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T03:43:10,979 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T03:43:10,979 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T03:43:10,979 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-02T03:43:10,979 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T03:43:10,979 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=e2eaa0f11f7e,32859,1733110990032}] 2024-12-02T03:43:11,137 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-02T03:43:11,139 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51591, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-02T03:43:11,146 INFO [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-02T03:43:11,147 INFO [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-02T03:43:11,150 INFO [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e2eaa0f11f7e%2C32859%2C1733110990032.meta, suffix=.meta, logDir=hdfs://localhost:40451/user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2/WALs/e2eaa0f11f7e,32859,1733110990032, archiveDir=hdfs://localhost:40451/user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2/oldWALs, maxLogs=32 2024-12-02T03:43:11,152 INFO [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor e2eaa0f11f7e%2C32859%2C1733110990032.meta.1733110991151.meta 2024-12-02T03:43:11,162 INFO [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2/WALs/e2eaa0f11f7e,32859,1733110990032/e2eaa0f11f7e%2C32859%2C1733110990032.meta.1733110991151.meta 2024-12-02T03:43:11,163 DEBUG [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43485:43485),(127.0.0.1/127.0.0.1:32841:32841),(127.0.0.1/127.0.0.1:38641:38641)] 2024-12-02T03:43:11,164 DEBUG [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-02T03:43:11,165 DEBUG [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-02T03:43:11,165 DEBUG [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-02T03:43:11,165 INFO [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-02T03:43:11,165 DEBUG [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-02T03:43:11,165 DEBUG [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T03:43:11,166 DEBUG [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-02T03:43:11,166 DEBUG [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-02T03:43:11,168 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-02T03:43:11,169 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-02T03:43:11,169 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T03:43:11,170 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T03:43:11,170 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-02T03:43:11,171 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-02T03:43:11,171 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T03:43:11,172 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T03:43:11,172 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-02T03:43:11,173 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-02T03:43:11,173 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T03:43:11,174 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T03:43:11,174 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-02T03:43:11,175 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-02T03:43:11,175 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T03:43:11,176 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T03:43:11,176 DEBUG [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-02T03:43:11,177 DEBUG [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40451/user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2/data/hbase/meta/1588230740 2024-12-02T03:43:11,179 DEBUG [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40451/user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2/data/hbase/meta/1588230740 2024-12-02T03:43:11,180 DEBUG [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-02T03:43:11,181 DEBUG [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-02T03:43:11,181 DEBUG [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-02T03:43:11,184 DEBUG [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-02T03:43:11,185 INFO [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65618649, jitterRate=-0.022205933928489685}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-02T03:43:11,185 DEBUG [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-02T03:43:11,186 DEBUG [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733110991166Writing region info on filesystem at 1733110991166Initializing all the Stores at 1733110991167 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733110991167Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733110991167Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733110991167Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733110991167Cleaning up temporary data from old regions at 1733110991181 (+14 ms)Running coprocessor post-open hooks at 1733110991185 (+4 ms)Region opened successfully at 1733110991186 (+1 ms) 2024-12-02T03:43:11,187 INFO [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733110991136 2024-12-02T03:43:11,190 DEBUG [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-02T03:43:11,191 INFO [RS_OPEN_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-02T03:43:11,192 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=e2eaa0f11f7e,32859,1733110990032 2024-12-02T03:43:11,193 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as e2eaa0f11f7e,32859,1733110990032, state=OPEN 2024-12-02T03:43:11,220 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36583-0x101956c96d80002, quorum=127.0.0.1:56492, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-02T03:43:11,220 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34035-0x101956c96d80000, quorum=127.0.0.1:56492, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-02T03:43:11,220 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32859-0x101956c96d80001, quorum=127.0.0.1:56492, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-02T03:43:11,220 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32983-0x101956c96d80003, quorum=127.0.0.1:56492, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-02T03:43:11,220 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=e2eaa0f11f7e,32859,1733110990032 2024-12-02T03:43:11,220 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T03:43:11,220 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T03:43:11,220 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T03:43:11,220 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T03:43:11,224 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-02T03:43:11,224 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=e2eaa0f11f7e,32859,1733110990032 in 241 msec 2024-12-02T03:43:11,228 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-02T03:43:11,228 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 685 msec 2024-12-02T03:43:11,229 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-02T03:43:11,229 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-02T03:43:11,231 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T03:43:11,231 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e2eaa0f11f7e,32859,1733110990032, seqNum=-1] 2024-12-02T03:43:11,231 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T03:43:11,233 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46259, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T03:43:11,241 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 756 msec 2024-12-02T03:43:11,241 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733110991241, completionTime=-1 2024-12-02T03:43:11,241 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-02T03:43:11,241 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-02T03:43:11,243 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-02T03:43:11,243 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733111051243 2024-12-02T03:43:11,243 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733111111243 2024-12-02T03:43:11,243 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-02T03:43:11,244 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e2eaa0f11f7e,34035,1733110989868-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T03:43:11,244 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e2eaa0f11f7e,34035,1733110989868-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T03:43:11,244 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e2eaa0f11f7e,34035,1733110989868-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T03:43:11,244 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-e2eaa0f11f7e:34035, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T03:43:11,244 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-02T03:43:11,244 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-02T03:43:11,246 DEBUG [master/e2eaa0f11f7e:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-02T03:43:11,248 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.069sec 2024-12-02T03:43:11,249 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-02T03:43:11,249 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-02T03:43:11,249 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-02T03:43:11,249 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-02T03:43:11,249 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-02T03:43:11,249 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e2eaa0f11f7e,34035,1733110989868-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-02T03:43:11,249 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e2eaa0f11f7e,34035,1733110989868-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-02T03:43:11,252 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-02T03:43:11,252 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-02T03:43:11,252 INFO [master/e2eaa0f11f7e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e2eaa0f11f7e,34035,1733110989868-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T03:43:11,338 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5ef9cf8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T03:43:11,339 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request e2eaa0f11f7e,34035,-1 for getting cluster id 2024-12-02T03:43:11,339 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T03:43:11,342 DEBUG [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8c404df0-ffe5-408c-9e02-a4e67e9a1be2' 2024-12-02T03:43:11,343 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T03:43:11,344 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8c404df0-ffe5-408c-9e02-a4e67e9a1be2" 2024-12-02T03:43:11,344 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7cb41bea, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T03:43:11,345 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e2eaa0f11f7e,34035,-1] 2024-12-02T03:43:11,345 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T03:43:11,346 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T03:43:11,349 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51490, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T03:43:11,351 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@98aa2ab, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T03:43:11,352 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T03:43:11,354 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e2eaa0f11f7e,32859,1733110990032, seqNum=-1] 2024-12-02T03:43:11,355 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T03:43:11,357 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56696, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T03:43:11,359 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=e2eaa0f11f7e,34035,1733110989868 2024-12-02T03:43:11,360 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-02T03:43:11,361 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.AsyncConnectionImpl(321): The fetched master address is e2eaa0f11f7e,34035,1733110989868 2024-12-02T03:43:11,361 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@75a8e33b 2024-12-02T03:43:11,362 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-02T03:43:11,363 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51492, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-02T03:43:11,364 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34035 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T03:43:11,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34035 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-12-02T03:43:11,368 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-12-02T03:43:11,368 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T03:43:11,368 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34035 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-12-02T03:43:11,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34035 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-02T03:43:11,370 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-02T03:43:11,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40641 is added to blk_1073741837_1013 (size=392) 2024-12-02T03:43:11,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39403 is added to blk_1073741837_1013 (size=392) 2024-12-02T03:43:11,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40671 is added to blk_1073741837_1013 (size=392) 2024-12-02T03:43:11,383 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => f178637b0593ab5690aa023efa6a7dde, NAME => 'TestHBaseWalOnEC,,1733110991364.f178637b0593ab5690aa023efa6a7dde.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40451/user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2 2024-12-02T03:43:11,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39403 is added to blk_1073741838_1014 (size=51) 2024-12-02T03:43:11,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40671 is added to blk_1073741838_1014 (size=51) 2024-12-02T03:43:11,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40641 is added to blk_1073741838_1014 (size=51) 2024-12-02T03:43:11,392 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733110991364.f178637b0593ab5690aa023efa6a7dde.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T03:43:11,392 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing f178637b0593ab5690aa023efa6a7dde, disabling compactions & flushes 2024-12-02T03:43:11,392 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733110991364.f178637b0593ab5690aa023efa6a7dde. 2024-12-02T03:43:11,392 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733110991364.f178637b0593ab5690aa023efa6a7dde. 2024-12-02T03:43:11,392 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733110991364.f178637b0593ab5690aa023efa6a7dde. after waiting 0 ms 2024-12-02T03:43:11,392 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733110991364.f178637b0593ab5690aa023efa6a7dde. 2024-12-02T03:43:11,392 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733110991364.f178637b0593ab5690aa023efa6a7dde. 2024-12-02T03:43:11,392 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for f178637b0593ab5690aa023efa6a7dde: Waiting for close lock at 1733110991392Disabling compacts and flushes for region at 1733110991392Disabling writes for close at 1733110991392Writing region close event to WAL at 1733110991392Closed at 1733110991392 2024-12-02T03:43:11,394 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-12-02T03:43:11,394 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1733110991364.f178637b0593ab5690aa023efa6a7dde.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1733110991394"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733110991394"}]},"ts":"1733110991394"} 2024-12-02T03:43:11,397 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-02T03:43:11,399 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-02T03:43:11,399 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733110991399"}]},"ts":"1733110991399"} 2024-12-02T03:43:11,401 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-12-02T03:43:11,402 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {e2eaa0f11f7e=0} racks are {/default-rack=0} 2024-12-02T03:43:11,403 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-02T03:43:11,403 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-02T03:43:11,403 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-02T03:43:11,403 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-02T03:43:11,403 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-02T03:43:11,403 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-02T03:43:11,403 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-02T03:43:11,403 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-02T03:43:11,403 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-02T03:43:11,403 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-02T03:43:11,403 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=f178637b0593ab5690aa023efa6a7dde, ASSIGN}] 2024-12-02T03:43:11,405 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=f178637b0593ab5690aa023efa6a7dde, ASSIGN 2024-12-02T03:43:11,406 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=f178637b0593ab5690aa023efa6a7dde, ASSIGN; state=OFFLINE, location=e2eaa0f11f7e,32859,1733110990032; forceNewPlan=false, retain=false 2024-12-02T03:43:11,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34035 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-02T03:43:11,557 INFO [e2eaa0f11f7e:34035 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-02T03:43:11,558 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=f178637b0593ab5690aa023efa6a7dde, regionState=OPENING, regionLocation=e2eaa0f11f7e,32859,1733110990032 2024-12-02T03:43:11,563 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=f178637b0593ab5690aa023efa6a7dde, ASSIGN because future has completed 2024-12-02T03:43:11,564 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure f178637b0593ab5690aa023efa6a7dde, server=e2eaa0f11f7e,32859,1733110990032}] 2024-12-02T03:43:11,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34035 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-02T03:43:11,725 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1733110991364.f178637b0593ab5690aa023efa6a7dde. 2024-12-02T03:43:11,726 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => f178637b0593ab5690aa023efa6a7dde, NAME => 'TestHBaseWalOnEC,,1733110991364.f178637b0593ab5690aa023efa6a7dde.', STARTKEY => '', ENDKEY => ''} 2024-12-02T03:43:11,726 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC f178637b0593ab5690aa023efa6a7dde 2024-12-02T03:43:11,726 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733110991364.f178637b0593ab5690aa023efa6a7dde.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T03:43:11,727 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for f178637b0593ab5690aa023efa6a7dde 2024-12-02T03:43:11,727 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for f178637b0593ab5690aa023efa6a7dde 2024-12-02T03:43:11,729 INFO [StoreOpener-f178637b0593ab5690aa023efa6a7dde-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region f178637b0593ab5690aa023efa6a7dde 2024-12-02T03:43:11,731 INFO [StoreOpener-f178637b0593ab5690aa023efa6a7dde-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f178637b0593ab5690aa023efa6a7dde columnFamilyName cf 2024-12-02T03:43:11,731 DEBUG [StoreOpener-f178637b0593ab5690aa023efa6a7dde-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T03:43:11,732 INFO [StoreOpener-f178637b0593ab5690aa023efa6a7dde-1 {}] regionserver.HStore(327): Store=f178637b0593ab5690aa023efa6a7dde/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T03:43:11,732 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for f178637b0593ab5690aa023efa6a7dde 2024-12-02T03:43:11,733 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40451/user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2/data/default/TestHBaseWalOnEC/f178637b0593ab5690aa023efa6a7dde 2024-12-02T03:43:11,734 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40451/user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2/data/default/TestHBaseWalOnEC/f178637b0593ab5690aa023efa6a7dde 2024-12-02T03:43:11,735 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for f178637b0593ab5690aa023efa6a7dde 2024-12-02T03:43:11,735 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for f178637b0593ab5690aa023efa6a7dde 2024-12-02T03:43:11,738 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for f178637b0593ab5690aa023efa6a7dde 2024-12-02T03:43:11,742 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40451/user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2/data/default/TestHBaseWalOnEC/f178637b0593ab5690aa023efa6a7dde/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T03:43:11,743 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened f178637b0593ab5690aa023efa6a7dde; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67193272, jitterRate=0.001257777214050293}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T03:43:11,744 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for f178637b0593ab5690aa023efa6a7dde 2024-12-02T03:43:11,746 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for f178637b0593ab5690aa023efa6a7dde: Running coprocessor pre-open hook at 1733110991727Writing region info on filesystem at 1733110991727Initializing all the Stores at 1733110991728 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733110991728Cleaning up temporary data from old regions at 1733110991735 (+7 ms)Running coprocessor post-open hooks at 1733110991744 (+9 ms)Region opened successfully at 1733110991745 (+1 ms) 2024-12-02T03:43:11,747 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1733110991364.f178637b0593ab5690aa023efa6a7dde., pid=6, masterSystemTime=1733110991719 2024-12-02T03:43:11,751 DEBUG [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1733110991364.f178637b0593ab5690aa023efa6a7dde. 2024-12-02T03:43:11,752 INFO [RS_OPEN_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1733110991364.f178637b0593ab5690aa023efa6a7dde. 2024-12-02T03:43:11,753 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=f178637b0593ab5690aa023efa6a7dde, regionState=OPEN, openSeqNum=2, regionLocation=e2eaa0f11f7e,32859,1733110990032 2024-12-02T03:43:11,757 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure f178637b0593ab5690aa023efa6a7dde, server=e2eaa0f11f7e,32859,1733110990032 because future has completed 2024-12-02T03:43:11,763 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-02T03:43:11,763 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure f178637b0593ab5690aa023efa6a7dde, server=e2eaa0f11f7e,32859,1733110990032 in 195 msec 2024-12-02T03:43:11,767 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-02T03:43:11,767 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=f178637b0593ab5690aa023efa6a7dde, ASSIGN in 360 msec 2024-12-02T03:43:11,768 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-02T03:43:11,768 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733110991768"}]},"ts":"1733110991768"} 2024-12-02T03:43:11,772 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-12-02T03:43:11,774 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-12-02T03:43:11,777 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 409 msec 2024-12-02T03:43:11,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34035 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-02T03:43:11,999 INFO [RPCClient-NioEventLoopGroup-6-9 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-12-02T03:43:11,999 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-12-02T03:43:12,000 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T03:43:12,004 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-12-02T03:43:12,004 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T03:43:12,004 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-12-02T03:43:12,008 DEBUG [RPCClient-NioEventLoopGroup-6-8 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1733110991364.f178637b0593ab5690aa023efa6a7dde., hostname=e2eaa0f11f7e,32859,1733110990032, seqNum=2] 2024-12-02T03:43:12,012 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34035 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-12-02T03:43:12,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34035 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-12-02T03:43:12,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34035 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-02T03:43:12,016 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-12-02T03:43:12,017 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-02T03:43:12,017 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-02T03:43:12,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34035 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-02T03:43:12,172 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32859 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-02T03:43:12,173 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1733110991364.f178637b0593ab5690aa023efa6a7dde. 2024-12-02T03:43:12,173 INFO [RS_FLUSH_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing f178637b0593ab5690aa023efa6a7dde 1/1 column families, dataSize=32 B heapSize=360 B 2024-12-02T03:43:12,201 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40451/user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2/data/default/TestHBaseWalOnEC/f178637b0593ab5690aa023efa6a7dde/.tmp/cf/b9f18f856c544fe9bf4dc5d40537704e is 36, key is row/cf:cq/1733110992010/Put/seqid=0 2024-12-02T03:43:12,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39403 is added to blk_1073741839_1015 (size=4787) 2024-12-02T03:43:12,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40641 is added to blk_1073741839_1015 (size=4787) 2024-12-02T03:43:12,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40671 is added to blk_1073741839_1015 (size=4787) 2024-12-02T03:43:12,209 INFO [RS_FLUSH_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:40451/user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2/data/default/TestHBaseWalOnEC/f178637b0593ab5690aa023efa6a7dde/.tmp/cf/b9f18f856c544fe9bf4dc5d40537704e 2024-12-02T03:43:12,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40451/user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2/data/default/TestHBaseWalOnEC/f178637b0593ab5690aa023efa6a7dde/.tmp/cf/b9f18f856c544fe9bf4dc5d40537704e as hdfs://localhost:40451/user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2/data/default/TestHBaseWalOnEC/f178637b0593ab5690aa023efa6a7dde/cf/b9f18f856c544fe9bf4dc5d40537704e 2024-12-02T03:43:12,224 INFO [RS_FLUSH_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40451/user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2/data/default/TestHBaseWalOnEC/f178637b0593ab5690aa023efa6a7dde/cf/b9f18f856c544fe9bf4dc5d40537704e, entries=1, sequenceid=5, filesize=4.7 K 2024-12-02T03:43:12,226 INFO [RS_FLUSH_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for f178637b0593ab5690aa023efa6a7dde in 53ms, sequenceid=5, compaction requested=false 2024-12-02T03:43:12,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for f178637b0593ab5690aa023efa6a7dde: 2024-12-02T03:43:12,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1733110991364.f178637b0593ab5690aa023efa6a7dde. 2024-12-02T03:43:12,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e2eaa0f11f7e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-02T03:43:12,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34035 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-02T03:43:12,233 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-02T03:43:12,233 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 212 msec 2024-12-02T03:43:12,236 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 222 msec 2024-12-02T03:43:12,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34035 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-02T03:43:12,329 INFO [RPCClient-NioEventLoopGroup-6-9 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-12-02T03:43:12,333 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-02T03:43:12,333 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-02T03:43:12,333 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T03:43:12,333 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T03:43:12,333 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T03:43:12,333 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T03:43:12,334 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-02T03:43:12,334 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=432711583, stopped=false 2024-12-02T03:43:12,334 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=e2eaa0f11f7e,34035,1733110989868 2024-12-02T03:43:12,383 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32859-0x101956c96d80001, quorum=127.0.0.1:56492, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-02T03:43:12,383 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36583-0x101956c96d80002, quorum=127.0.0.1:56492, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-02T03:43:12,383 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34035-0x101956c96d80000, quorum=127.0.0.1:56492, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-02T03:43:12,383 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32859-0x101956c96d80001, quorum=127.0.0.1:56492, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T03:43:12,383 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32983-0x101956c96d80003, quorum=127.0.0.1:56492, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-02T03:43:12,383 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34035-0x101956c96d80000, quorum=127.0.0.1:56492, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T03:43:12,383 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32983-0x101956c96d80003, quorum=127.0.0.1:56492, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T03:43:12,384 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-02T03:43:12,384 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36583-0x101956c96d80002, quorum=127.0.0.1:56492, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T03:43:12,384 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-02T03:43:12,385 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:32859-0x101956c96d80001, quorum=127.0.0.1:56492, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T03:43:12,386 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:32983-0x101956c96d80003, quorum=127.0.0.1:56492, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T03:43:12,386 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T03:43:12,386 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T03:43:12,387 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34035-0x101956c96d80000, quorum=127.0.0.1:56492, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T03:43:12,387 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'e2eaa0f11f7e,32859,1733110990032' ***** 2024-12-02T03:43:12,387 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-02T03:43:12,387 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'e2eaa0f11f7e,36583,1733110990074' ***** 2024-12-02T03:43:12,388 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-02T03:43:12,388 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36583-0x101956c96d80002, quorum=127.0.0.1:56492, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T03:43:12,388 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'e2eaa0f11f7e,32983,1733110990111' ***** 2024-12-02T03:43:12,388 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-02T03:43:12,388 INFO [RS:0;e2eaa0f11f7e:32859 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-02T03:43:12,389 INFO [RS:0;e2eaa0f11f7e:32859 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-02T03:43:12,389 INFO [RS:0;e2eaa0f11f7e:32859 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-02T03:43:12,389 INFO [RS:1;e2eaa0f11f7e:36583 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-02T03:43:12,389 INFO [RS:0;e2eaa0f11f7e:32859 {}] regionserver.HRegionServer(3091): Received CLOSE for f178637b0593ab5690aa023efa6a7dde 2024-12-02T03:43:12,389 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-02T03:43:12,389 INFO [RS:1;e2eaa0f11f7e:36583 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-02T03:43:12,389 INFO [RS:1;e2eaa0f11f7e:36583 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-02T03:43:12,390 INFO [RS:2;e2eaa0f11f7e:32983 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-02T03:43:12,390 INFO [RS:1;e2eaa0f11f7e:36583 {}] regionserver.HRegionServer(959): stopping server e2eaa0f11f7e,36583,1733110990074 2024-12-02T03:43:12,390 INFO [regionserver/e2eaa0f11f7e:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-02T03:43:12,390 INFO [regionserver/e2eaa0f11f7e:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-02T03:43:12,390 INFO [RS:2;e2eaa0f11f7e:32983 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-02T03:43:12,390 INFO [RS:1;e2eaa0f11f7e:36583 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-02T03:43:12,390 INFO [RS:2;e2eaa0f11f7e:32983 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-02T03:43:12,390 INFO [RS:1;e2eaa0f11f7e:36583 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;e2eaa0f11f7e:36583. 2024-12-02T03:43:12,390 INFO [regionserver/e2eaa0f11f7e:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-02T03:43:12,390 INFO [RS:2;e2eaa0f11f7e:32983 {}] regionserver.HRegionServer(959): stopping server e2eaa0f11f7e,32983,1733110990111 2024-12-02T03:43:12,390 INFO [RS:2;e2eaa0f11f7e:32983 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-02T03:43:12,390 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-02T03:43:12,390 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-02T03:43:12,390 DEBUG [RS:1;e2eaa0f11f7e:36583 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T03:43:12,390 INFO [RS:2;e2eaa0f11f7e:32983 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;e2eaa0f11f7e:32983. 2024-12-02T03:43:12,390 INFO [RS:0;e2eaa0f11f7e:32859 {}] regionserver.HRegionServer(959): stopping server e2eaa0f11f7e,32859,1733110990032 2024-12-02T03:43:12,390 DEBUG [RS:1;e2eaa0f11f7e:36583 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T03:43:12,390 INFO [RS:0;e2eaa0f11f7e:32859 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-02T03:43:12,390 DEBUG [RS:2;e2eaa0f11f7e:32983 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T03:43:12,390 INFO [RS:0;e2eaa0f11f7e:32859 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;e2eaa0f11f7e:32859. 2024-12-02T03:43:12,390 INFO [RS:1;e2eaa0f11f7e:36583 {}] regionserver.HRegionServer(976): stopping server e2eaa0f11f7e,36583,1733110990074; all regions closed. 2024-12-02T03:43:12,390 DEBUG [RS:2;e2eaa0f11f7e:32983 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T03:43:12,390 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing f178637b0593ab5690aa023efa6a7dde, disabling compactions & flushes 2024-12-02T03:43:12,390 INFO [RS:2;e2eaa0f11f7e:32983 {}] regionserver.HRegionServer(976): stopping server e2eaa0f11f7e,32983,1733110990111; all regions closed. 2024-12-02T03:43:12,390 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733110991364.f178637b0593ab5690aa023efa6a7dde. 2024-12-02T03:43:12,390 DEBUG [RS:0;e2eaa0f11f7e:32859 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T03:43:12,390 DEBUG [RS:0;e2eaa0f11f7e:32859 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T03:43:12,390 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733110991364.f178637b0593ab5690aa023efa6a7dde. 2024-12-02T03:43:12,390 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733110991364.f178637b0593ab5690aa023efa6a7dde. after waiting 0 ms 2024-12-02T03:43:12,390 INFO [RS:0;e2eaa0f11f7e:32859 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-02T03:43:12,390 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733110991364.f178637b0593ab5690aa023efa6a7dde. 2024-12-02T03:43:12,391 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T03:43:12,391 INFO [RS:0;e2eaa0f11f7e:32859 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-02T03:43:12,391 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T03:43:12,391 INFO [RS:0;e2eaa0f11f7e:32859 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-02T03:43:12,391 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T03:43:12,391 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T03:43:12,391 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T03:43:12,391 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T03:43:12,391 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T03:43:12,391 INFO [RS:0;e2eaa0f11f7e:32859 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-02T03:43:12,391 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T03:43:12,391 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T03:43:12,391 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T03:43:12,392 INFO [RS:0;e2eaa0f11f7e:32859 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-02T03:43:12,392 DEBUG [RS:0;e2eaa0f11f7e:32859 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, f178637b0593ab5690aa023efa6a7dde=TestHBaseWalOnEC,,1733110991364.f178637b0593ab5690aa023efa6a7dde.} 2024-12-02T03:43:12,392 DEBUG [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-02T03:43:12,392 DEBUG [RS:0;e2eaa0f11f7e:32859 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, f178637b0593ab5690aa023efa6a7dde 2024-12-02T03:43:12,392 INFO [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-02T03:43:12,392 DEBUG [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-02T03:43:12,392 DEBUG [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-02T03:43:12,392 DEBUG [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-02T03:43:12,392 INFO [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-12-02T03:43:12,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40671 is added to blk_1073741835_1011 (size=93) 2024-12-02T03:43:12,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40641 is added to blk_1073741835_1011 (size=93) 2024-12-02T03:43:12,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39403 is added to blk_1073741835_1011 (size=93) 2024-12-02T03:43:12,398 DEBUG [RS:2;e2eaa0f11f7e:32983 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2/oldWALs 2024-12-02T03:43:12,398 INFO [RS:2;e2eaa0f11f7e:32983 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e2eaa0f11f7e%2C32983%2C1733110990111:(num 1733110990832) 2024-12-02T03:43:12,398 DEBUG [RS:2;e2eaa0f11f7e:32983 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T03:43:12,398 INFO [RS:2;e2eaa0f11f7e:32983 {}] regionserver.LeaseManager(133): Closed leases 2024-12-02T03:43:12,398 INFO [RS:2;e2eaa0f11f7e:32983 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-02T03:43:12,399 INFO [RS:2;e2eaa0f11f7e:32983 {}] hbase.ChoreService(370): Chore service for: regionserver/e2eaa0f11f7e:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-02T03:43:12,399 INFO [RS:2;e2eaa0f11f7e:32983 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-02T03:43:12,399 INFO [regionserver/e2eaa0f11f7e:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-02T03:43:12,399 INFO [RS:2;e2eaa0f11f7e:32983 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-02T03:43:12,399 INFO [RS:2;e2eaa0f11f7e:32983 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-02T03:43:12,399 INFO [RS:2;e2eaa0f11f7e:32983 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-02T03:43:12,399 INFO [RS:2;e2eaa0f11f7e:32983 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:32983 2024-12-02T03:43:12,400 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40451/user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2/data/default/TestHBaseWalOnEC/f178637b0593ab5690aa023efa6a7dde/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-02T03:43:12,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39403 is added to blk_1073741833_1009 (size=93) 2024-12-02T03:43:12,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40641 is added to blk_1073741833_1009 (size=93) 2024-12-02T03:43:12,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40671 is added to blk_1073741833_1009 (size=93) 2024-12-02T03:43:12,401 INFO [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733110991364.f178637b0593ab5690aa023efa6a7dde. 2024-12-02T03:43:12,401 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for f178637b0593ab5690aa023efa6a7dde: Waiting for close lock at 1733110992390Running coprocessor pre-close hooks at 1733110992390Disabling compacts and flushes for region at 1733110992390Disabling writes for close at 1733110992390Writing region close event to WAL at 1733110992392 (+2 ms)Running coprocessor post-close hooks at 1733110992401 (+9 ms)Closed at 1733110992401 2024-12-02T03:43:12,401 DEBUG [RS_CLOSE_REGION-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1733110991364.f178637b0593ab5690aa023efa6a7dde. 2024-12-02T03:43:12,411 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34035-0x101956c96d80000, quorum=127.0.0.1:56492, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-02T03:43:12,411 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32983-0x101956c96d80003, quorum=127.0.0.1:56492, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/e2eaa0f11f7e,32983,1733110990111 2024-12-02T03:43:12,411 INFO [RS:2;e2eaa0f11f7e:32983 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-02T03:43:12,412 DEBUG [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40451/user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2/data/hbase/meta/1588230740/.tmp/info/9f0278f1bc274b0e9fcb8961cad58b1d is 153, key is TestHBaseWalOnEC,,1733110991364.f178637b0593ab5690aa023efa6a7dde./info:regioninfo/1733110991753/Put/seqid=0 2024-12-02T03:43:12,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40641 is added to blk_1073741840_1016 (size=6637) 2024-12-02T03:43:12,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40671 is added to blk_1073741840_1016 (size=6637) 2024-12-02T03:43:12,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39403 is added to blk_1073741840_1016 (size=6637) 2024-12-02T03:43:12,419 INFO [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40451/user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2/data/hbase/meta/1588230740/.tmp/info/9f0278f1bc274b0e9fcb8961cad58b1d 2024-12-02T03:43:12,419 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [e2eaa0f11f7e,32983,1733110990111] 2024-12-02T03:43:12,423 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-02T03:43:12,424 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-02T03:43:12,427 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-02T03:43:12,427 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-02T03:43:12,427 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-02T03:43:12,428 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-02T03:43:12,428 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/e2eaa0f11f7e,32983,1733110990111 already deleted, retry=false 2024-12-02T03:43:12,428 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; e2eaa0f11f7e,32983,1733110990111 expired; onlineServers=2 2024-12-02T03:43:12,444 DEBUG [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40451/user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2/data/hbase/meta/1588230740/.tmp/ns/47c1756f818948d9b9fdf754f9dacd72 is 43, key is default/ns:d/1733110991233/Put/seqid=0 2024-12-02T03:43:12,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40671 is added to blk_1073741841_1017 (size=5153) 2024-12-02T03:43:12,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39403 is added to blk_1073741841_1017 (size=5153) 2024-12-02T03:43:12,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40641 is added to blk_1073741841_1017 (size=5153) 2024-12-02T03:43:12,451 INFO [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40451/user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2/data/hbase/meta/1588230740/.tmp/ns/47c1756f818948d9b9fdf754f9dacd72 2024-12-02T03:43:12,472 DEBUG [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40451/user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2/data/hbase/meta/1588230740/.tmp/table/778d1ab63de74d3ca24b3f4179b0a232 is 52, key is TestHBaseWalOnEC/table:state/1733110991768/Put/seqid=0 2024-12-02T03:43:12,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40671 is added to blk_1073741842_1018 (size=5249) 2024-12-02T03:43:12,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39403 is added to blk_1073741842_1018 (size=5249) 2024-12-02T03:43:12,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40641 is added to blk_1073741842_1018 (size=5249) 2024-12-02T03:43:12,480 INFO [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40451/user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2/data/hbase/meta/1588230740/.tmp/table/778d1ab63de74d3ca24b3f4179b0a232 2024-12-02T03:43:12,489 DEBUG [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40451/user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2/data/hbase/meta/1588230740/.tmp/info/9f0278f1bc274b0e9fcb8961cad58b1d as hdfs://localhost:40451/user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2/data/hbase/meta/1588230740/info/9f0278f1bc274b0e9fcb8961cad58b1d 2024-12-02T03:43:12,497 INFO [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40451/user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2/data/hbase/meta/1588230740/info/9f0278f1bc274b0e9fcb8961cad58b1d, entries=10, sequenceid=11, filesize=6.5 K 2024-12-02T03:43:12,498 DEBUG [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40451/user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2/data/hbase/meta/1588230740/.tmp/ns/47c1756f818948d9b9fdf754f9dacd72 as hdfs://localhost:40451/user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2/data/hbase/meta/1588230740/ns/47c1756f818948d9b9fdf754f9dacd72 2024-12-02T03:43:12,506 INFO [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40451/user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2/data/hbase/meta/1588230740/ns/47c1756f818948d9b9fdf754f9dacd72, entries=2, sequenceid=11, filesize=5.0 K 2024-12-02T03:43:12,507 DEBUG [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40451/user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2/data/hbase/meta/1588230740/.tmp/table/778d1ab63de74d3ca24b3f4179b0a232 as hdfs://localhost:40451/user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2/data/hbase/meta/1588230740/table/778d1ab63de74d3ca24b3f4179b0a232 2024-12-02T03:43:12,516 INFO [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40451/user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2/data/hbase/meta/1588230740/table/778d1ab63de74d3ca24b3f4179b0a232, entries=2, sequenceid=11, filesize=5.1 K 2024-12-02T03:43:12,517 INFO [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 125ms, sequenceid=11, compaction requested=false 2024-12-02T03:43:12,520 INFO [RS:2;e2eaa0f11f7e:32983 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-02T03:43:12,520 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32983-0x101956c96d80003, quorum=127.0.0.1:56492, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T03:43:12,520 INFO [RS:2;e2eaa0f11f7e:32983 {}] regionserver.HRegionServer(1031): Exiting; stopping=e2eaa0f11f7e,32983,1733110990111; zookeeper connection closed. 2024-12-02T03:43:12,520 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32983-0x101956c96d80003, quorum=127.0.0.1:56492, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T03:43:12,520 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@a72ddb7 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@a72ddb7 2024-12-02T03:43:12,524 DEBUG [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40451/user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-02T03:43:12,525 DEBUG [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-02T03:43:12,525 INFO [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-02T03:43:12,525 DEBUG [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733110992392Running coprocessor pre-close hooks at 1733110992392Disabling compacts and flushes for region at 1733110992392Disabling writes for close at 1733110992392Obtaining lock to block concurrent updates at 1733110992392Preparing flush snapshotting stores in 1588230740 at 1733110992392Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1733110992392Flushing stores of hbase:meta,,1.1588230740 at 1733110992393 (+1 ms)Flushing 1588230740/info: creating writer at 1733110992393Flushing 1588230740/info: appending metadata at 1733110992411 (+18 ms)Flushing 1588230740/info: closing flushed file at 1733110992411Flushing 1588230740/ns: creating writer at 1733110992426 (+15 ms)Flushing 1588230740/ns: appending metadata at 1733110992443 (+17 ms)Flushing 1588230740/ns: closing flushed file at 1733110992443Flushing 1588230740/table: creating writer at 1733110992458 (+15 ms)Flushing 1588230740/table: appending metadata at 1733110992472 (+14 ms)Flushing 1588230740/table: closing flushed file at 1733110992472Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@34aa27c4: reopening flushed file at 1733110992487 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@696de4bf: reopening flushed file at 1733110992497 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@450f6957: reopening flushed file at 1733110992506 (+9 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 125ms, sequenceid=11, compaction requested=false at 1733110992517 (+11 ms)Writing region close event to WAL at 1733110992519 (+2 ms)Running coprocessor post-close hooks at 1733110992525 (+6 ms)Closed at 1733110992525 2024-12-02T03:43:12,526 DEBUG [RS_CLOSE_META-regionserver/e2eaa0f11f7e:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-02T03:43:12,592 INFO [RS:0;e2eaa0f11f7e:32859 {}] regionserver.HRegionServer(976): stopping server e2eaa0f11f7e,32859,1733110990032; all regions closed. 2024-12-02T03:43:12,592 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T03:43:12,593 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T03:43:12,593 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T03:43:12,593 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T03:43:12,593 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T03:43:12,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40671 is added to blk_1073741836_1012 (size=2751) 2024-12-02T03:43:12,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39403 is added to blk_1073741836_1012 (size=2751) 2024-12-02T03:43:12,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40641 is added to blk_1073741836_1012 (size=2751) 2024-12-02T03:43:12,600 DEBUG [RS:0;e2eaa0f11f7e:32859 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2/oldWALs 2024-12-02T03:43:12,600 INFO [RS:0;e2eaa0f11f7e:32859 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e2eaa0f11f7e%2C32859%2C1733110990032.meta:.meta(num 1733110991151) 2024-12-02T03:43:12,600 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T03:43:12,600 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T03:43:12,600 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T03:43:12,601 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T03:43:12,601 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T03:43:12,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40641 is added to blk_1073741834_1010 (size=1298) 2024-12-02T03:43:12,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40671 is added to blk_1073741834_1010 (size=1298) 2024-12-02T03:43:12,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39403 is added to blk_1073741834_1010 (size=1298) 2024-12-02T03:43:12,609 DEBUG [RS:0;e2eaa0f11f7e:32859 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2/oldWALs 2024-12-02T03:43:12,609 INFO [RS:0;e2eaa0f11f7e:32859 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e2eaa0f11f7e%2C32859%2C1733110990032:(num 1733110990832) 2024-12-02T03:43:12,609 DEBUG [RS:0;e2eaa0f11f7e:32859 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T03:43:12,609 INFO [RS:0;e2eaa0f11f7e:32859 {}] regionserver.LeaseManager(133): Closed leases 2024-12-02T03:43:12,609 INFO [RS:0;e2eaa0f11f7e:32859 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-02T03:43:12,610 INFO [RS:0;e2eaa0f11f7e:32859 {}] hbase.ChoreService(370): Chore service for: regionserver/e2eaa0f11f7e:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-02T03:43:12,610 INFO [RS:0;e2eaa0f11f7e:32859 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-02T03:43:12,610 INFO [regionserver/e2eaa0f11f7e:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-02T03:43:12,610 INFO [RS:0;e2eaa0f11f7e:32859 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:32859 2024-12-02T03:43:12,632 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34035-0x101956c96d80000, quorum=127.0.0.1:56492, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-02T03:43:12,632 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32859-0x101956c96d80001, quorum=127.0.0.1:56492, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/e2eaa0f11f7e,32859,1733110990032 2024-12-02T03:43:12,632 INFO [RS:0;e2eaa0f11f7e:32859 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-02T03:43:12,641 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [e2eaa0f11f7e,32859,1733110990032] 2024-12-02T03:43:12,649 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/e2eaa0f11f7e,32859,1733110990032 already deleted, retry=false 2024-12-02T03:43:12,649 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; e2eaa0f11f7e,32859,1733110990032 expired; onlineServers=1 2024-12-02T03:43:12,688 INFO [regionserver/e2eaa0f11f7e:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-02T03:43:12,688 INFO [regionserver/e2eaa0f11f7e:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-02T03:43:12,741 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32859-0x101956c96d80001, quorum=127.0.0.1:56492, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T03:43:12,741 INFO [RS:0;e2eaa0f11f7e:32859 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-02T03:43:12,741 INFO [RS:0;e2eaa0f11f7e:32859 {}] regionserver.HRegionServer(1031): Exiting; stopping=e2eaa0f11f7e,32859,1733110990032; zookeeper connection closed. 2024-12-02T03:43:12,741 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32859-0x101956c96d80001, quorum=127.0.0.1:56492, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T03:43:12,741 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@71f71f44 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@71f71f44 2024-12-02T03:43:12,808 DEBUG [RS:1;e2eaa0f11f7e:36583 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2/oldWALs 2024-12-02T03:43:12,808 INFO [RS:1;e2eaa0f11f7e:36583 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e2eaa0f11f7e%2C36583%2C1733110990074:(num 1733110990825) 2024-12-02T03:43:12,808 DEBUG [RS:1;e2eaa0f11f7e:36583 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T03:43:12,809 INFO [RS:1;e2eaa0f11f7e:36583 {}] regionserver.LeaseManager(133): Closed leases 2024-12-02T03:43:12,809 INFO [RS:1;e2eaa0f11f7e:36583 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-02T03:43:12,809 INFO [RS:1;e2eaa0f11f7e:36583 {}] hbase.ChoreService(370): Chore service for: regionserver/e2eaa0f11f7e:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-02T03:43:12,809 INFO [RS:1;e2eaa0f11f7e:36583 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-02T03:43:12,809 INFO [RS:1;e2eaa0f11f7e:36583 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-02T03:43:12,809 INFO [regionserver/e2eaa0f11f7e:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-02T03:43:12,809 INFO [RS:1;e2eaa0f11f7e:36583 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-02T03:43:12,810 INFO [RS:1;e2eaa0f11f7e:36583 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-02T03:43:12,810 INFO [RS:1;e2eaa0f11f7e:36583 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36583 2024-12-02T03:43:12,841 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36583-0x101956c96d80002, quorum=127.0.0.1:56492, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/e2eaa0f11f7e,36583,1733110990074 2024-12-02T03:43:12,841 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34035-0x101956c96d80000, quorum=127.0.0.1:56492, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-02T03:43:12,842 INFO [RS:1;e2eaa0f11f7e:36583 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-02T03:43:12,869 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [e2eaa0f11f7e,36583,1733110990074] 2024-12-02T03:43:12,882 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/e2eaa0f11f7e,36583,1733110990074 already deleted, retry=false 2024-12-02T03:43:12,882 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; e2eaa0f11f7e,36583,1733110990074 expired; onlineServers=0 2024-12-02T03:43:12,882 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'e2eaa0f11f7e,34035,1733110989868' ***** 2024-12-02T03:43:12,882 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-02T03:43:12,883 INFO [M:0;e2eaa0f11f7e:34035 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-02T03:43:12,883 INFO [M:0;e2eaa0f11f7e:34035 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-02T03:43:12,883 DEBUG [M:0;e2eaa0f11f7e:34035 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-02T03:43:12,883 DEBUG [M:0;e2eaa0f11f7e:34035 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-02T03:43:12,883 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-02T03:43:12,883 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster-HFileCleaner.large.0-1733110990490 {}] cleaner.HFileCleaner(306): Exit Thread[master/e2eaa0f11f7e:0:becomeActiveMaster-HFileCleaner.large.0-1733110990490,5,FailOnTimeoutGroup] 2024-12-02T03:43:12,883 DEBUG [master/e2eaa0f11f7e:0:becomeActiveMaster-HFileCleaner.small.0-1733110990490 {}] cleaner.HFileCleaner(306): Exit Thread[master/e2eaa0f11f7e:0:becomeActiveMaster-HFileCleaner.small.0-1733110990490,5,FailOnTimeoutGroup] 2024-12-02T03:43:12,883 INFO [M:0;e2eaa0f11f7e:34035 {}] hbase.ChoreService(370): Chore service for: master/e2eaa0f11f7e:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-02T03:43:12,884 INFO [M:0;e2eaa0f11f7e:34035 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-02T03:43:12,884 DEBUG [M:0;e2eaa0f11f7e:34035 {}] master.HMaster(1795): Stopping service threads 2024-12-02T03:43:12,884 INFO [M:0;e2eaa0f11f7e:34035 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-02T03:43:12,884 INFO [M:0;e2eaa0f11f7e:34035 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-02T03:43:12,884 INFO [M:0;e2eaa0f11f7e:34035 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-02T03:43:12,885 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-02T03:43:12,894 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34035-0x101956c96d80000, quorum=127.0.0.1:56492, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-02T03:43:12,895 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34035-0x101956c96d80000, quorum=127.0.0.1:56492, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T03:43:12,895 DEBUG [M:0;e2eaa0f11f7e:34035 {}] zookeeper.ZKUtil(347): master:34035-0x101956c96d80000, quorum=127.0.0.1:56492, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-02T03:43:12,895 WARN [M:0;e2eaa0f11f7e:34035 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-02T03:43:12,896 INFO [M:0;e2eaa0f11f7e:34035 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:40451/user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2/.lastflushedseqids 2024-12-02T03:43:12,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40641 is added to blk_1073741843_1019 (size=127) 2024-12-02T03:43:12,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39403 is added to blk_1073741843_1019 (size=127) 2024-12-02T03:43:12,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40671 is added to blk_1073741843_1019 (size=127) 2024-12-02T03:43:12,910 INFO [M:0;e2eaa0f11f7e:34035 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-02T03:43:12,910 INFO [M:0;e2eaa0f11f7e:34035 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-02T03:43:12,910 DEBUG [M:0;e2eaa0f11f7e:34035 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-02T03:43:12,910 INFO [M:0;e2eaa0f11f7e:34035 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T03:43:12,910 DEBUG [M:0;e2eaa0f11f7e:34035 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T03:43:12,911 DEBUG [M:0;e2eaa0f11f7e:34035 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-02T03:43:12,911 DEBUG [M:0;e2eaa0f11f7e:34035 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T03:43:12,911 INFO [M:0;e2eaa0f11f7e:34035 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.82 KB heapSize=34.11 KB 2024-12-02T03:43:12,931 DEBUG [M:0;e2eaa0f11f7e:34035 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40451/user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/01f1db051f004f5c8034f6b3d5984715 is 82, key is hbase:meta,,1/info:regioninfo/1733110991192/Put/seqid=0 2024-12-02T03:43:12,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39403 is added to blk_1073741844_1020 (size=5672) 2024-12-02T03:43:12,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40671 is added to blk_1073741844_1020 (size=5672) 2024-12-02T03:43:12,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40641 is added to blk_1073741844_1020 (size=5672) 2024-12-02T03:43:12,939 INFO [M:0;e2eaa0f11f7e:34035 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:40451/user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/01f1db051f004f5c8034f6b3d5984715 2024-12-02T03:43:12,961 DEBUG [M:0;e2eaa0f11f7e:34035 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40451/user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/20176fb78ff841bdb281f9353eccf818 is 747, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733110991776/Put/seqid=0 2024-12-02T03:43:12,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39403 is added to blk_1073741845_1021 (size=6438) 2024-12-02T03:43:12,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40671 is added to blk_1073741845_1021 (size=6438) 2024-12-02T03:43:12,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40641 is added to blk_1073741845_1021 (size=6438) 2024-12-02T03:43:12,968 INFO [M:0;e2eaa0f11f7e:34035 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.13 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:40451/user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/20176fb78ff841bdb281f9353eccf818 2024-12-02T03:43:12,969 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36583-0x101956c96d80002, quorum=127.0.0.1:56492, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T03:43:12,969 INFO [RS:1;e2eaa0f11f7e:36583 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-02T03:43:12,969 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36583-0x101956c96d80002, quorum=127.0.0.1:56492, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T03:43:12,969 INFO [RS:1;e2eaa0f11f7e:36583 {}] regionserver.HRegionServer(1031): Exiting; stopping=e2eaa0f11f7e,36583,1733110990074; zookeeper connection closed. 2024-12-02T03:43:12,970 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@4c8aa3b6 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@4c8aa3b6 2024-12-02T03:43:12,970 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-02T03:43:12,988 DEBUG [M:0;e2eaa0f11f7e:34035 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40451/user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/a23f13b785ea474d80dedef9b2b90919 is 69, key is e2eaa0f11f7e,32859,1733110990032/rs:state/1733110990613/Put/seqid=0 2024-12-02T03:43:12,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39403 is added to blk_1073741846_1022 (size=5294) 2024-12-02T03:43:12,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40641 is added to blk_1073741846_1022 (size=5294) 2024-12-02T03:43:12,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40671 is added to blk_1073741846_1022 (size=5294) 2024-12-02T03:43:12,997 INFO [M:0;e2eaa0f11f7e:34035 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:40451/user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/a23f13b785ea474d80dedef9b2b90919 2024-12-02T03:43:13,004 DEBUG [M:0;e2eaa0f11f7e:34035 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40451/user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/01f1db051f004f5c8034f6b3d5984715 as hdfs://localhost:40451/user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/01f1db051f004f5c8034f6b3d5984715 2024-12-02T03:43:13,011 INFO [M:0;e2eaa0f11f7e:34035 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40451/user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/01f1db051f004f5c8034f6b3d5984715, entries=8, sequenceid=72, filesize=5.5 K 2024-12-02T03:43:13,012 DEBUG [M:0;e2eaa0f11f7e:34035 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40451/user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/20176fb78ff841bdb281f9353eccf818 as hdfs://localhost:40451/user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/20176fb78ff841bdb281f9353eccf818 2024-12-02T03:43:13,019 INFO [M:0;e2eaa0f11f7e:34035 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40451/user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/20176fb78ff841bdb281f9353eccf818, entries=8, sequenceid=72, filesize=6.3 K 2024-12-02T03:43:13,020 DEBUG [M:0;e2eaa0f11f7e:34035 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40451/user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/a23f13b785ea474d80dedef9b2b90919 as hdfs://localhost:40451/user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/a23f13b785ea474d80dedef9b2b90919 2024-12-02T03:43:13,025 INFO [M:0;e2eaa0f11f7e:34035 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40451/user/jenkins/test-data/95533f63-1347-7cad-daa5-b300334aa4f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/a23f13b785ea474d80dedef9b2b90919, entries=3, sequenceid=72, filesize=5.2 K 2024-12-02T03:43:13,027 INFO [M:0;e2eaa0f11f7e:34035 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.82 KB/27459, heapSize ~33.81 KB/34624, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 116ms, sequenceid=72, compaction requested=false 2024-12-02T03:43:13,028 INFO [M:0;e2eaa0f11f7e:34035 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T03:43:13,028 DEBUG [M:0;e2eaa0f11f7e:34035 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733110992910Disabling compacts and flushes for region at 1733110992910Disabling writes for close at 1733110992911 (+1 ms)Obtaining lock to block concurrent updates at 1733110992911Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733110992911Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27459, getHeapSize=34864, getOffHeapSize=0, getCellsCount=85 at 1733110992911Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733110992913 (+2 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733110992913Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733110992931 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733110992931Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733110992945 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733110992960 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733110992960Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733110992973 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733110992988 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733110992988Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@8f3718f: reopening flushed file at 1733110993003 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5229037e: reopening flushed file at 1733110993011 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1fd1ab1e: reopening flushed file at 1733110993019 (+8 ms)Finished flush of dataSize ~26.82 KB/27459, heapSize ~33.81 KB/34624, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 116ms, sequenceid=72, compaction requested=false at 1733110993027 (+8 ms)Writing region close event to WAL at 1733110993028 (+1 ms)Closed at 1733110993028 2024-12-02T03:43:13,029 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T03:43:13,029 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T03:43:13,029 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T03:43:13,029 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T03:43:13,029 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T03:43:13,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40671 is added to blk_1073741830_1006 (size=32662) 2024-12-02T03:43:13,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39403 is added to blk_1073741830_1006 (size=32662) 2024-12-02T03:43:13,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40641 is added to blk_1073741830_1006 (size=32662) 2024-12-02T03:43:13,033 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-02T03:43:13,033 INFO [M:0;e2eaa0f11f7e:34035 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-02T03:43:13,033 INFO [M:0;e2eaa0f11f7e:34035 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34035 2024-12-02T03:43:13,033 INFO [M:0;e2eaa0f11f7e:34035 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-02T03:43:13,149 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34035-0x101956c96d80000, quorum=127.0.0.1:56492, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T03:43:13,149 INFO [M:0;e2eaa0f11f7e:34035 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-02T03:43:13,149 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34035-0x101956c96d80000, quorum=127.0.0.1:56492, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T03:43:13,151 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6e89cb0b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T03:43:13,152 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6b3c8c82{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T03:43:13,152 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T03:43:13,152 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4c77de1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T03:43:13,152 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@73f6422f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c2a70ae2-417f-19d4-9b2e-705d583b69bc/hadoop.log.dir/,STOPPED} 2024-12-02T03:43:13,153 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-02T03:43:13,153 WARN [BP-1681032769-172.17.0.2-1733110987988 heartbeating to localhost/127.0.0.1:40451 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-02T03:43:13,153 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-02T03:43:13,153 WARN [BP-1681032769-172.17.0.2-1733110987988 heartbeating to localhost/127.0.0.1:40451 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1681032769-172.17.0.2-1733110987988 (Datanode Uuid 45bd36b6-b207-4167-9c9f-110c8311064a) service to localhost/127.0.0.1:40451 2024-12-02T03:43:13,154 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c2a70ae2-417f-19d4-9b2e-705d583b69bc/cluster_1818e1a0-9560-afa9-e1ef-707f3c5f44ca/data/data5/current/BP-1681032769-172.17.0.2-1733110987988 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T03:43:13,154 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c2a70ae2-417f-19d4-9b2e-705d583b69bc/cluster_1818e1a0-9560-afa9-e1ef-707f3c5f44ca/data/data6/current/BP-1681032769-172.17.0.2-1733110987988 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T03:43:13,154 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-02T03:43:13,156 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6f8d2ee2{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T03:43:13,156 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6beabb01{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T03:43:13,157 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T03:43:13,157 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4e5afbc4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T03:43:13,157 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2c597470{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c2a70ae2-417f-19d4-9b2e-705d583b69bc/hadoop.log.dir/,STOPPED} 2024-12-02T03:43:13,158 WARN [BP-1681032769-172.17.0.2-1733110987988 heartbeating to localhost/127.0.0.1:40451 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-02T03:43:13,158 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-02T03:43:13,158 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-02T03:43:13,158 WARN [BP-1681032769-172.17.0.2-1733110987988 heartbeating to localhost/127.0.0.1:40451 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1681032769-172.17.0.2-1733110987988 (Datanode Uuid 0304e183-0ba4-45f7-8857-2c1b05faab38) service to localhost/127.0.0.1:40451 2024-12-02T03:43:13,159 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c2a70ae2-417f-19d4-9b2e-705d583b69bc/cluster_1818e1a0-9560-afa9-e1ef-707f3c5f44ca/data/data3/current/BP-1681032769-172.17.0.2-1733110987988 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T03:43:13,159 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c2a70ae2-417f-19d4-9b2e-705d583b69bc/cluster_1818e1a0-9560-afa9-e1ef-707f3c5f44ca/data/data4/current/BP-1681032769-172.17.0.2-1733110987988 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T03:43:13,159 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-02T03:43:13,161 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@700f39d7{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T03:43:13,161 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4e9ae4fc{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T03:43:13,161 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T03:43:13,161 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@61d23bc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T03:43:13,162 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@137179d0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c2a70ae2-417f-19d4-9b2e-705d583b69bc/hadoop.log.dir/,STOPPED} 2024-12-02T03:43:13,163 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-02T03:43:13,163 WARN [BP-1681032769-172.17.0.2-1733110987988 heartbeating to localhost/127.0.0.1:40451 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-02T03:43:13,163 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-02T03:43:13,163 WARN [BP-1681032769-172.17.0.2-1733110987988 heartbeating to localhost/127.0.0.1:40451 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1681032769-172.17.0.2-1733110987988 (Datanode Uuid 1be90b24-094d-4fd7-bdba-852cef2a1e5f) service to localhost/127.0.0.1:40451 2024-12-02T03:43:13,163 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c2a70ae2-417f-19d4-9b2e-705d583b69bc/cluster_1818e1a0-9560-afa9-e1ef-707f3c5f44ca/data/data1/current/BP-1681032769-172.17.0.2-1733110987988 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T03:43:13,163 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c2a70ae2-417f-19d4-9b2e-705d583b69bc/cluster_1818e1a0-9560-afa9-e1ef-707f3c5f44ca/data/data2/current/BP-1681032769-172.17.0.2-1733110987988 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T03:43:13,164 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-02T03:43:13,168 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6ffa125c{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-02T03:43:13,169 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3aa18531{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T03:43:13,169 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T03:43:13,169 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@16eaa68d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T03:43:13,169 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@18f854cf{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c2a70ae2-417f-19d4-9b2e-705d583b69bc/hadoop.log.dir/,STOPPED} 2024-12-02T03:43:13,176 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-02T03:43:13,199 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-02T03:43:13,205 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=151 (was 92) - Thread LEAK? -, OpenFileDescriptor=516 (was 439) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=200 (was 182) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=7869 (was 8042)